]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'sound-4.12-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 10 Jun 2017 05:15:08 +0000 (22:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 10 Jun 2017 05:15:08 +0000 (22:15 -0700)
Pull sound fixes from Takashi Iwai:
 "This update contains a slightly hight amount of changes due to the
  pending ASoC fixes:

   - ALSA timer core got a couple of fixes for races between read and
     ioctl, leading to potential read of uninitialized kmalloced memory

   - ASoC core fixed the de-registration pattern for use-after-free bug

   - The rewrite of probe code in ASoC Intel Skylake for i915 component

   - ASoC R-snd got a series of fixes for SSI

   - ASoC simple-card, atmel, da7213, and rt286 trivial fixes

   - HD-audio ALC269 quirk and rearrangement of quirk table"

* tag 'sound-4.12-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound:
  ALSA: timer: Fix missing queue indices reset at SNDRV_TIMER_IOCTL_SELECT
  ALSA: timer: Fix race between read and ioctl
  ALSA: hda/realtek - Reorder ALC269 ASUS quirk entries
  ALSA: hda/realtek: Fix mic and headset jack sense on Asus X705UD
  ASoC: rsnd: fixup parent_clk_name of AUDIO_CLKOUTx
  ASoC: Intel: Skylake: Fix to parse consecutive string tkns in manifest
  ASoC: Intel: Skylake: Fix IPC rx_list corruption
  ASoC: rsnd: SSI PIO adjust to 24bit mode
  MAINTAINERS: Update email address for patches to Wolfson parts
  ASoC: Fix use-after-free at card unregistration
  ASoC: simple-card: fix mic jack initialization
  ASoC: rsnd: don't call free_irq() on Parent SSI
  ASoC: atmel-classd: sync regcache when resuming
  ASoC: rsnd: don't use PDTA bit for 24bit on SSI
  ASoC: da7213: Fix incorrect usage of bitwise '&' operator for SRM check
  rt286: add Thinkpad Helix 2 to force_combo_jack_table
  ASoC: Intel: Skylake: Move i915 registration to worker thread

694 files changed:
Documentation/acpi/acpi-lid.txt
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/pm/cpufreq.rst
Documentation/admin-guide/pm/index.rst
Documentation/admin-guide/pm/intel_pstate.rst [new file with mode: 0644]
Documentation/cpu-freq/intel-pstate.txt [deleted file]
Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt
Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
Documentation/devicetree/bindings/net/dsa/marvell.txt
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
Documentation/input/devices/edt-ft5x06.rst
Documentation/networking/dpaa.txt [new file with mode: 0644]
Documentation/networking/tcp.txt
MAINTAINERS
Makefile
arch/arm/boot/compressed/efi-header.S
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/imx6ul-14x14-evk.dts
arch/arm/boot/dts/keystone-k2l-netcp.dtsi
arch/arm/boot/dts/keystone-k2l.dtsi
arch/arm/boot/dts/versatile-pb.dts
arch/arm/common/mcpm_entry.c
arch/arm/include/asm/pgtable-nommu.h
arch/arm/mach-at91/Kconfig
arch/arm/mach-davinci/pm.c
arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
arch/arm64/boot/dts/hisilicon/hi6220.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
arch/arm64/configs/defconfig
arch/arm64/include/asm/acpi.h
arch/arm64/kernel/pci.c
arch/frv/include/asm/timex.h
arch/mips/kernel/process.c
arch/openrisc/kernel/process.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/64/hash-4k.h
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/topology.h
arch/powerpc/include/uapi/asm/cputable.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/perf/power9-pmu.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spufs/coredump.c
arch/powerpc/platforms/powernv/npu-dma.c
arch/powerpc/platforms/powernv/subcore.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/sysdev/simple_gpio.c
arch/sparc/Kconfig
arch/sparc/include/asm/mmu_64.h
arch/sparc/include/asm/mmu_context_64.h
arch/sparc/include/asm/pil.h
arch/sparc/include/asm/vio.h
arch/sparc/kernel/ds.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/kernel.h
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/tsb.S
arch/sparc/kernel/ttable_64.S
arch/sparc/kernel/vio.c
arch/sparc/lib/Makefile
arch/sparc/lib/multi3.S [new file with mode: 0644]
arch/sparc/mm/init_64.c
arch/sparc/mm/tsb.c
arch/sparc/mm/ultra.S
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/include/asm/mce.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/process_32.c
arch/x86/kernel/setup.c
arch/x86/kernel/unwind_frame.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/pageattr.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk.h
block/cfq-iosched.c
block/partition-generic.c
block/partitions/msdos.c
crypto/asymmetric_keys/public_key.c
crypto/drbg.c
crypto/gcm.c
crypto/skcipher.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/battery.c
drivers/acpi/button.c
drivers/acpi/device_pm.c
drivers/acpi/nfit/mce.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/ata/ahci.c
drivers/ata/libahci_platform.c
drivers/ata/libata-core.c
drivers/ata/sata_mv.c
drivers/ata/sata_rcar.c
drivers/base/power/main.c
drivers/base/power/wakeup.c
drivers/block/nbd.c
drivers/block/rbd.c
drivers/char/pcmcia/cm4040_cs.c
drivers/char/random.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/dma/ep93xx_dma.c
drivers/dma/mv_xor_v2.c
drivers/dma/pl330.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/sh/usb-dmac.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/efi-bgrt.c
drivers/firmware/efi/efi-pstore.c
drivers/firmware/efi/libstub/secureboot.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/gma500/psb_intel_lvds.c
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uc.h
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fence.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-pre.c
drivers/hid/Kconfig
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-elecom.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/wacom_wac.c
drivers/hwmon/Kconfig
drivers/hwmon/aspeed-pwm-tacho.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-tiny-usb.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/netlink.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/uverbs_marshall.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/chip_registers.h
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/intr.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/sysfs.c
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_osdep.h
drivers/infiniband/hw/i40iw/i40iw_type.h
drivers/infiniband/hw/i40iw/i40iw_utils.c
drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/keyboard/tm2-touchkey.c
drivers/input/misc/axp20x-pek.c
drivers/input/mouse/elan_i2c_i2c.c
drivers/input/mouse/synaptics.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/input/touchscreen/silead.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/mISDN/stack.c
drivers/leds/leds-pca955x.c
drivers/md/bitmap.c
drivers/md/dm-bufio.c
drivers/md/dm-integrity.c
drivers/md/dm-ioctl.c
drivers/md/dm-raid1.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-verity-target.c
drivers/md/dm.c
drivers/md/md-cluster.c
drivers/md/md.c
drivers/md/raid5-cache.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/media/Kconfig
drivers/media/Makefile
drivers/media/cec/Kconfig
drivers/media/cec/Makefile
drivers/media/cec/cec-adap.c
drivers/media/cec/cec-core.c
drivers/media/i2c/Kconfig
drivers/media/platform/Kconfig
drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
drivers/media/platform/vivid/Kconfig
drivers/media/rc/rc-ir-raw.c
drivers/media/usb/pulse8-cec/Kconfig
drivers/media/usb/rainshadow-cec/Kconfig
drivers/media/usb/rainshadow-cec/rainshadow-cec.c
drivers/memory/atmel-ebi.c
drivers/misc/cxl/file.c
drivers/misc/cxl/native.c
drivers/misc/sgi-xp/xp.h
drivers/misc/sgi-xp/xp_main.c
drivers/mmc/core/pwrseq_simple.c
drivers/mmc/host/cavium-octeon.c
drivers/mmc/host/cavium-thunderx.c
drivers/mmc/host/cavium.c
drivers/mmc/host/sdhci-iproc.c
drivers/mmc/host/sdhci-xenon-phy.c
drivers/mmc/host/sdhci-xenon.c
drivers/mmc/host/sdhci-xenon.h
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/nand_samsung.c
drivers/mtd/nand/tango_nand.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/mv88e6xxx/global2.h
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qualcomm/emac/emac-mac.c
drivers/net/ethernet/qualcomm/emac/emac-phy.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/geneve.c
drivers/net/gtp.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/phy/Kconfig
drivers/net/phy/marvell.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/smsc95xx.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/loop.c
drivers/of/platform.c
drivers/pci/dwc/pci-imx6.c
drivers/pci/endpoint/Kconfig
drivers/pci/pci.c
drivers/pci/switch/switchtec.c
drivers/perf/arm_pmu_acpi.c
drivers/pinctrl/core.c
drivers/pinctrl/freescale/pinctrl-mxs.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/pinconf-generic.c
drivers/pinctrl/pinmux.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
drivers/powercap/powercap_sys.c
drivers/reset/hisilicon/hi6220_reset.c
drivers/rtc/rtc-cmos.c
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_disc.h
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_nvmet.h
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/qedi/qedi.h
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/ufs/ufshcd.c
drivers/staging/media/atomisp/i2c/Makefile
drivers/staging/media/atomisp/i2c/imx/Makefile
drivers/staging/media/atomisp/i2c/ov5693/Makefile
drivers/staging/media/atomisp/pci/atomisp2/Makefile
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl0.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/thermal/broadcom/Kconfig
drivers/thermal/qoriq_thermal.c
drivers/thermal/thermal_core.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/tty/ehv_bytechan.c
drivers/tty/serdev/core.c
drivers/tty/serdev/serdev-ttyport.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/altera_jtaguart.c
drivers/tty/serial/altera_uart.c
drivers/tty/serial/efm32-uart.c
drivers/tty/serial/ifx6x60.c
drivers/tty/serial/imx.c
drivers/tty/serial/serial_core.c
drivers/tty/tty_port.c
drivers/xen/privcmd.c
fs/ceph/file.c
fs/dax.c
fs/gfs2/log.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/internal.h
fs/nfs/namespace.c
fs/nfs/nfs42proc.c
fs/nfs/nfs4client.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/super.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfsxdr.c
fs/ntfs/namei.c
fs/ocfs2/export.c
fs/overlayfs/Kconfig
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/proc/base.c
fs/reiserfs/journal.c
fs/ufs/super.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_refcount.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_file.c
fs/xfs/xfs_fsmap.c
include/drm/drm_dp_helper.h
include/linux/blk-mq.h
include/linux/ceph/ceph_debug.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/compiler-clang.h
include/linux/filter.h
include/linux/gfp.h
include/linux/gpio/machine.h
include/linux/if_vlan.h
include/linux/jiffies.h
include/linux/memblock.h
include/linux/mlx4/qp.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/netfilter/x_tables.h
include/linux/netfilter_bridge/ebtables.h
include/linux/of_platform.h
include/linux/pci.h
include/linux/pinctrl/pinconf-generic.h
include/linux/ptrace.h
include/linux/serdev.h
include/linux/sunrpc/svc.h
include/linux/suspend.h
include/linux/tty.h
include/linux/usb/usbnet.h
include/media/cec-notifier.h
include/media/cec.h
include/net/dst.h
include/net/ip_fib.h
include/net/ipv6.h
include/net/netfilter/nf_conntrack_helper.h
include/net/netfilter/nf_tables.h
include/net/tc_act/tc_csum.h
include/net/tcp.h
include/net/xfrm.h
include/rdma/ib_sa.h
include/rdma/rdma_netlink.h
include/target/iscsi/iscsi_target_core.h
kernel/bpf/arraymap.c
kernel/bpf/lpm_trie.c
kernel/bpf/stackmap.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/fork.c
kernel/kprobes.c
kernel/livepatch/Kconfig
kernel/locking/rtmutex.c
kernel/power/process.c
kernel/power/snapshot.c
kernel/power/suspend.c
kernel/printk/printk.c
kernel/ptrace.c
kernel/sched/cpufreq_schedutil.c
kernel/time/posix-cpu-timers.c
kernel/trace/ftrace.c
lib/test_bpf.c
mm/gup.c
mm/hugetlb.c
mm/ksm.c
mm/memblock.c
mm/memory-failure.c
mm/memory.c
mm/mlock.c
mm/page_alloc.c
mm/slub.c
mm/util.c
net/bridge/br_netlink.c
net/bridge/br_stp_if.c
net/bridge/br_stp_timer.c
net/bridge/netfilter/ebt_arpreply.c
net/bridge/netfilter/ebtables.c
net/ceph/auth_x.c
net/ceph/ceph_common.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osdmap.c
net/core/devlink.c
net/core/dst.c
net/core/filter.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sysctl_net_core.c
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/legacy.c
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/esp4.c
net/ipv4/fib_semantics.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv6/calipso.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/xfrm6_mode_ro.c
net/ipv6/xfrm6_mode_transport.c
net/key/af_key.c
net/llc/af_llc.c
net/mac80211/agg-tx.c
net/mac80211/ht.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mpls/af_mpls.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_bitwise.c
net/netfilter/nft_cmp.c
net/netfilter/nft_ct.c
net/netfilter/nft_immediate.c
net/netfilter/nft_range.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_rbtree.c
net/netfilter/x_tables.c
net/netfilter/xt_CT.c
net/netlink/af_netlink.c
net/openvswitch/conntrack.c
net/sched/cls_matchall.c
net/sctp/associola.c
net/sctp/input.c
net/sctp/sm_make_chunk.c
net/sctp/sm_statefuns.c
net/sunrpc/xprtrdma/backchannel.c
net/sunrpc/xprtsock.c
net/vmw_vsock/af_vsock.c
net/wireless/scan.c
net/wireless/util.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
scripts/gdb/linux/dmesg.py
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/arch/x86/include/asm/required-features.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/include/linux/filter.h
tools/include/uapi/linux/stat.h
tools/perf/Documentation/perf-script.txt
tools/perf/builtin-script.c
tools/perf/ui/hist.c
tools/perf/util/callchain.c
tools/perf/util/evsel_fprintf.c
tools/perf/util/srcline.c
tools/perf/util/unwind-libdw.c
tools/perf/util/unwind-libunwind-local.c
tools/power/acpi/.gitignore [new file with mode: 0644]
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc [new file with mode: 0644]
tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
usr/Kconfig

index 22cb3091f29776b2acfb0df339db150393a8ad83..effe7af3a5af95d25f86efadaa7dd2b127a0dea9 100644 (file)
@@ -59,20 +59,28 @@ button driver uses the following 3 modes in order not to trigger issues.
 If the userspace hasn't been prepared to ignore the unreliable "opened"
 events and the unreliable initial state notification, Linux users can use
 the following kernel parameters to handle the possible issues:
-A. button.lid_init_state=open:
+A. button.lid_init_state=method:
+   When this option is specified, the ACPI button driver reports the
+   initial lid state using the returning value of the _LID control method
+   and whether the "opened"/"closed" events are paired fully relies on the
+   firmware implementation.
+   This option can be used to fix some platforms where the returning value
+   of the _LID control method is reliable but the initial lid state
+   notification is missing.
+   This option is the default behavior during the period the userspace
+   isn't ready to handle the buggy AML tables.
+B. button.lid_init_state=open:
    When this option is specified, the ACPI button driver always reports the
    initial lid state as "opened" and whether the "opened"/"closed" events
    are paired fully relies on the firmware implementation.
    This may fix some platforms where the returning value of the _LID
    control method is not reliable and the initial lid state notification is
    missing.
-   This option is the default behavior during the period the userspace
-   isn't ready to handle the buggy AML tables.
 
 If the userspace has been prepared to ignore the unreliable "opened" events
 and the unreliable initial state notification, Linux users should always
 use the following kernel parameter:
-B. button.lid_init_state=ignore:
+C. button.lid_init_state=ignore:
    When this option is specified, the ACPI button driver never reports the
    initial lid state and there is a compensation mechanism implemented to
    ensure that the reliable "closed" notifications can always be delievered
index 15f79c27748df1611b1643b77ca68e2a5e7cfaab..0f5c3b4347c6f94a82385f193e76bc99dba19db5 100644 (file)
 
        dscc4.setup=    [NET]
 
+       dt_cpu_ftrs=    [PPC]
+                       Format: {"off" | "known"}
+                       Control how the dt_cpu_ftrs device-tree binding is
+                       used for CPU feature discovery and setup (if it
+                       exists).
+                       off: Do not use it, fall back to legacy cpu table.
+                       known: Do not pass through unknown features to guests
+                       or userspace, only those that the kernel is aware of.
+
        dump_apple_properties   [X86]
                        Dump name and content of EFI device properties on
                        x86 Macs.  Useful for driver authors to determine
index 289c80f7760ebb966b13d91b5627e61963e5b3aa..09aa2e9497875acec984dc68727e14139c5d23ad 100644 (file)
@@ -1,4 +1,5 @@
 .. |struct cpufreq_policy| replace:: :c:type:`struct cpufreq_policy <cpufreq_policy>`
+.. |intel_pstate| replace:: :doc:`intel_pstate <intel_pstate>`
 
 =======================
 CPU Performance Scaling
@@ -75,7 +76,7 @@ feedback registers, as that information is typically specific to the hardware
 interface it comes from and may not be easily represented in an abstract,
 platform-independent way.  For this reason, ``CPUFreq`` allows scaling drivers
 to bypass the governor layer and implement their own performance scaling
-algorithms.  That is done by the ``intel_pstate`` scaling driver.
+algorithms.  That is done by the |intel_pstate| scaling driver.
 
 
 ``CPUFreq`` Policy Objects
@@ -174,13 +175,13 @@ necessary to restart the scaling governor so that it can take the new online CPU
 into account.  That is achieved by invoking the governor's ``->stop`` and
 ``->start()`` callbacks, in this order, for the entire policy.
 
-As mentioned before, the ``intel_pstate`` scaling driver bypasses the scaling
+As mentioned before, the |intel_pstate| scaling driver bypasses the scaling
 governor layer of ``CPUFreq`` and provides its own P-state selection algorithms.
-Consequently, if ``intel_pstate`` is used, scaling governors are not attached to
+Consequently, if |intel_pstate| is used, scaling governors are not attached to
 new policy objects.  Instead, the driver's ``->setpolicy()`` callback is invoked
 to register per-CPU utilization update callbacks for each policy.  These
 callbacks are invoked by the CPU scheduler in the same way as for scaling
-governors, but in the ``intel_pstate`` case they both determine the P-state to
+governors, but in the |intel_pstate| case they both determine the P-state to
 use and change the hardware configuration accordingly in one go from scheduler
 context.
 
@@ -257,7 +258,7 @@ are the following:
 
 ``scaling_available_governors``
        List of ``CPUFreq`` scaling governors present in the kernel that can
-       be attached to this policy or (if the ``intel_pstate`` scaling driver is
+       be attached to this policy or (if the |intel_pstate| scaling driver is
        in use) list of scaling algorithms provided by the driver that can be
        applied to this policy.
 
@@ -274,7 +275,7 @@ are the following:
        the CPU is actually running at (due to hardware design and other
        limitations).
 
-       Some scaling drivers (e.g. ``intel_pstate``) attempt to provide
+       Some scaling drivers (e.g. |intel_pstate|) attempt to provide
        information more precisely reflecting the current CPU frequency through
        this attribute, but that still may not be the exact current CPU
        frequency as seen by the hardware at the moment.
@@ -284,13 +285,13 @@ are the following:
 
 ``scaling_governor``
        The scaling governor currently attached to this policy or (if the
-       ``intel_pstate`` scaling driver is in use) the scaling algorithm
+       |intel_pstate| scaling driver is in use) the scaling algorithm
        provided by the driver that is currently applied to this policy.
 
        This attribute is read-write and writing to it will cause a new scaling
        governor to be attached to this policy or a new scaling algorithm
        provided by the scaling driver to be applied to it (in the
-       ``intel_pstate`` case), as indicated by the string written to this
+       |intel_pstate| case), as indicated by the string written to this
        attribute (which must be one of the names listed by the
        ``scaling_available_governors`` attribute described above).
 
@@ -619,7 +620,7 @@ This file is located under :file:`/sys/devices/system/cpu/cpufreq/` and controls
 the "boost" setting for the whole system.  It is not present if the underlying
 scaling driver does not support the frequency boost mechanism (or supports it,
 but provides a driver-specific interface for controlling it, like
-``intel_pstate``).
+|intel_pstate|).
 
 If the value in this file is 1, the frequency boost mechanism is enabled.  This
 means that either the hardware can be put into states in which it is able to
index c80f087321fcb704e549254c70ccb927f9659b4c..7f148f76f432cbfc62ed663a6be7b9849b66b3c6 100644 (file)
@@ -6,6 +6,7 @@ Power Management
    :maxdepth: 2
 
    cpufreq
+   intel_pstate
 
 .. only::  subproject and html
 
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
new file mode 100644 (file)
index 0000000..33d7039
--- /dev/null
@@ -0,0 +1,755 @@
+===============================================
+``intel_pstate`` CPU Performance Scaling Driver
+===============================================
+
+::
+
+ Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+
+General Information
+===================
+
+``intel_pstate`` is a part of the
+:doc:`CPU performance scaling subsystem <cpufreq>` in the Linux kernel
+(``CPUFreq``).  It is a scaling driver for the Sandy Bridge and later
+generations of Intel processors.  Note, however, that some of those processors
+may not be supported.  [To understand ``intel_pstate`` it is necessary to know
+how ``CPUFreq`` works in general, so this is the time to read :doc:`cpufreq` if
+you have not done that yet.]
+
+For the processors supported by ``intel_pstate``, the P-state concept is broader
+than just an operating frequency or an operating performance point (see the
+`LinuxCon Europe 2015 presentation by Kristen Accardi <LCEU2015_>`_ for more
+information about that).  For this reason, the representation of P-states used
+by ``intel_pstate`` internally follows the hardware specification (for details
+refer to `Intel® 64 and IA-32 Architectures Software Developer’s Manual
+Volume 3: System Programming Guide <SDM_>`_).  However, the ``CPUFreq`` core
+uses frequencies for identifying operating performance points of CPUs and
+frequencies are involved in the user space interface exposed by it, so
+``intel_pstate`` maps its internal representation of P-states to frequencies too
+(fortunately, that mapping is unambiguous).  At the same time, it would not be
+practical for ``intel_pstate`` to supply the ``CPUFreq`` core with a table of
+available frequencies due to the possible size of it, so the driver does not do
+that.  Some functionality of the core is limited by that.
+
+Since the hardware P-state selection interface used by ``intel_pstate`` is
+available at the logical CPU level, the driver always works with individual
+CPUs.  Consequently, if ``intel_pstate`` is in use, every ``CPUFreq`` policy
+object corresponds to one logical CPU and ``CPUFreq`` policies are effectively
+equivalent to CPUs.  In particular, this means that they become "inactive" every
+time the corresponding CPU is taken offline and need to be re-initialized when
+it goes back online.
+
+``intel_pstate`` is not modular, so it cannot be unloaded, which means that the
+only way to pass early-configuration-time parameters to it is via the kernel
+command line.  However, its configuration can be adjusted via ``sysfs`` to a
+great extent.  In some configurations it even is possible to unregister it via
+``sysfs`` which allows another ``CPUFreq`` scaling driver to be loaded and
+registered (see `below <status_attr_>`_).
+
+
+Operation Modes
+===============
+
+``intel_pstate`` can operate in three different modes: in the active mode with
+or without hardware-managed P-states support and in the passive mode.  Which of
+them will be in effect depends on what kernel command line options are used and
+on the capabilities of the processor.
+
+Active Mode
+-----------
+
+This is the default operation mode of ``intel_pstate``.  If it works in this
+mode, the ``scaling_driver`` policy attribute in ``sysfs`` for all ``CPUFreq``
+policies contains the string "intel_pstate".
+
+In this mode the driver bypasses the scaling governors layer of ``CPUFreq`` and
+provides its own scaling algorithms for P-state selection.  Those algorithms
+can be applied to ``CPUFreq`` policies in the same way as generic scaling
+governors (that is, through the ``scaling_governor`` policy attribute in
+``sysfs``).  [Note that different P-state selection algorithms may be chosen for
+different policies, but that is not recommended.]
+
+They are not generic scaling governors, but their names are the same as the
+names of some of those governors.  Moreover, confusingly enough, they generally
+do not work in the same way as the generic governors they share the names with.
+For example, the ``powersave`` P-state selection algorithm provided by
+``intel_pstate`` is not a counterpart of the generic ``powersave`` governor
+(roughly, it corresponds to the ``schedutil`` and ``ondemand`` governors).
+
+There are two P-state selection algorithms provided by ``intel_pstate`` in the
+active mode: ``powersave`` and ``performance``.  The way they both operate
+depends on whether or not the hardware-managed P-states (HWP) feature has been
+enabled in the processor and possibly on the processor model.
+
+Which of the P-state selection algorithms is used by default depends on the
+:c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option.
+Namely, if that option is set, the ``performance`` algorithm will be used by
+default, and the other one will be used by default if it is not set.
+
+Active Mode With HWP
+~~~~~~~~~~~~~~~~~~~~
+
+If the processor supports the HWP feature, it will be enabled during the
+processor initialization and cannot be disabled after that.  It is possible
+to avoid enabling it by passing the ``intel_pstate=no_hwp`` argument to the
+kernel in the command line.
+
+If the HWP feature has been enabled, ``intel_pstate`` relies on the processor to
+select P-states by itself, but still it can give hints to the processor's
+internal P-state selection logic.  What those hints are depends on which P-state
+selection algorithm has been applied to the given policy (or to the CPU it
+corresponds to).
+
+Even though the P-state selection is carried out by the processor automatically,
+``intel_pstate`` registers utilization update callbacks with the CPU scheduler
+in this mode.  However, they are not used for running a P-state selection
+algorithm, but for periodic updates of the current CPU frequency information to
+be made available from the ``scaling_cur_freq`` policy attribute in ``sysfs``.
+
+HWP + ``performance``
+.....................
+
+In this configuration ``intel_pstate`` will write 0 to the processor's
+Energy-Performance Preference (EPP) knob (if supported) or its
+Energy-Performance Bias (EPB) knob (otherwise), which means that the processor's
+internal P-state selection logic is expected to focus entirely on performance.
+
+This will override the EPP/EPB setting coming from the ``sysfs`` interface
+(see `Energy vs Performance Hints`_ below).
+
+Also, in this configuration the range of P-states available to the processor's
+internal P-state selection logic is always restricted to the upper boundary
+(that is, the maximum P-state that the driver is allowed to use).
+
+HWP + ``powersave``
+...................
+
+In this configuration ``intel_pstate`` will set the processor's
+Energy-Performance Preference (EPP) knob (if supported) or its
+Energy-Performance Bias (EPB) knob (otherwise) to whatever value it was
+previously set to via ``sysfs`` (or whatever default value it was
+set to by the platform firmware).  This usually causes the processor's
+internal P-state selection logic to be less performance-focused.
+
+Active Mode Without HWP
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This is the default operation mode for processors that do not support the HWP
+feature.  It also is used by default with the ``intel_pstate=no_hwp`` argument
+in the kernel command line.  However, in this mode ``intel_pstate`` may refuse
+to work with the given processor if it does not recognize it.  [Note that
+``intel_pstate`` will never refuse to work with any processor with the HWP
+feature enabled.]
+
+In this mode ``intel_pstate`` registers utilization update callbacks with the
+CPU scheduler in order to run a P-state selection algorithm, either
+``powersave`` or ``performance``, depending on the ``scaling_cur_freq`` policy
+setting in ``sysfs``.  The current CPU frequency information to be made
+available from the ``scaling_cur_freq`` policy attribute in ``sysfs`` is
+periodically updated by those utilization update callbacks too.
+
+``performance``
+...............
+
+Without HWP, this P-state selection algorithm is always the same regardless of
+the processor model and platform configuration.
+
+It selects the maximum P-state it is allowed to use, subject to limits set via
+``sysfs``, every time the P-state selection computations are carried out by the
+driver's utilization update callback for the given CPU (that does not happen
+more often than every 10 ms), but the hardware configuration will not be changed
+if the new P-state is the same as the current one.
+
+This is the default P-state selection algorithm if the
+:c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option
+is set.
+
+``powersave``
+.............
+
+Without HWP, this P-state selection algorithm generally depends on the
+processor model and/or the system profile setting in the ACPI tables and there
+are two variants of it.
+
+One of them is used with processors from the Atom line and (regardless of the
+processor model) on platforms with the system profile in the ACPI tables set to
+"mobile" (laptops mostly), "tablet", "appliance PC", "desktop", or
+"workstation".  It is also used with processors supporting the HWP feature if
+that feature has not been enabled (that is, with the ``intel_pstate=no_hwp``
+argument in the kernel command line).  It is similar to the algorithm
+implemented by the generic ``schedutil`` scaling governor except that the
+utilization metric used by it is based on numbers coming from feedback
+registers of the CPU.  It generally selects P-states proportional to the
+current CPU utilization, so it is referred to as the "proportional" algorithm.
+
+The second variant of the ``powersave`` P-state selection algorithm, used in all
+of the other cases (generally, on processors from the Core line, so it is
+referred to as the "Core" algorithm), is based on the values read from the APERF
+and MPERF feedback registers and the previously requested target P-state.
+It does not really take CPU utilization into account explicitly, but as a rule
+it causes the CPU P-state to ramp up very quickly in response to increased
+utilization which is generally desirable in server environments.
+
+Regardless of the variant, this algorithm is run by the driver's utilization
+update callback for the given CPU when it is invoked by the CPU scheduler, but
+not more often than every 10 ms (that can be tweaked via ``debugfs`` in `this
+particular case <Tuning Interface in debugfs_>`_).  Like in the ``performance``
+case, the hardware configuration is not touched if the new P-state turns out to
+be the same as the current one.
+
+This is the default P-state selection algorithm if the
+:c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option
+is not set.
+
+Passive Mode
+------------
+
+This mode is used if the ``intel_pstate=passive`` argument is passed to the
+kernel in the command line (it implies the ``intel_pstate=no_hwp`` setting too).
+Like in the active mode without HWP support, in this mode ``intel_pstate`` may
+refuse to work with the given processor if it does not recognize it.
+
+If the driver works in this mode, the ``scaling_driver`` policy attribute in
+``sysfs`` for all ``CPUFreq`` policies contains the string "intel_cpufreq".
+Then, the driver behaves like a regular ``CPUFreq`` scaling driver.  That is,
+it is invoked by generic scaling governors when necessary to talk to the
+hardware in order to change the P-state of a CPU (in particular, the
+``schedutil`` governor can invoke it directly from scheduler context).
+
+While in this mode, ``intel_pstate`` can be used with all of the (generic)
+scaling governors listed by the ``scaling_available_governors`` policy attribute
+in ``sysfs`` (and the P-state selection algorithms described above are not
+used).  Then, it is responsible for the configuration of policy objects
+corresponding to CPUs and provides the ``CPUFreq`` core (and the scaling
+governors attached to the policy objects) with accurate information on the
+maximum and minimum operating frequencies supported by the hardware (including
+the so-called "turbo" frequency ranges).  In other words, in the passive mode
+the entire range of available P-states is exposed by ``intel_pstate`` to the
+``CPUFreq`` core.  However, in this mode the driver does not register
+utilization update callbacks with the CPU scheduler and the ``scaling_cur_freq``
+information comes from the ``CPUFreq`` core (and is the last frequency selected
+by the current scaling governor for the given policy).
+
+
+.. _turbo:
+
+Turbo P-states Support
+======================
+
+In the majority of cases, the entire range of P-states available to
+``intel_pstate`` can be divided into two sub-ranges that correspond to
+different types of processor behavior, above and below a boundary that
+will be referred to as the "turbo threshold" in what follows.
+
+The P-states above the turbo threshold are referred to as "turbo P-states" and
+the whole sub-range of P-states they belong to is referred to as the "turbo
+range".  These names are related to the Turbo Boost technology allowing a
+multicore processor to opportunistically increase the P-state of one or more
+cores if there is enough power to do that and if that is not going to cause the
+thermal envelope of the processor package to be exceeded.
+
+Specifically, if software sets the P-state of a CPU core within the turbo range
+(that is, above the turbo threshold), the processor is permitted to take over
+performance scaling control for that core and put it into turbo P-states of its
+choice going forward.  However, that permission is interpreted differently by
+different processor generations.  Namely, the Sandy Bridge generation of
+processors will never use any P-states above the last one set by software for
+the given core, even if it is within the turbo range, whereas all of the later
+processor generations will take it as a license to use any P-states from the
+turbo range, even above the one set by software.  In other words, on those
+processors setting any P-state from the turbo range will enable the processor
+to put the given core into all turbo P-states up to and including the maximum
+supported one as it sees fit.
+
+One important property of turbo P-states is that they are not sustainable.  More
+precisely, there is no guarantee that any CPUs will be able to stay in any of
+those states indefinitely, because the power distribution within the processor
+package may change over time  or the thermal envelope it was designed for might
+be exceeded if a turbo P-state was used for too long.
+
+In turn, the P-states below the turbo threshold generally are sustainable.  In
+fact, if one of them is set by software, the processor is not expected to change
+it to a lower one unless in a thermal stress or a power limit violation
+situation (a higher P-state may still be used if it is set for another CPU in
+the same package at the same time, for example).
+
+Some processors allow multiple cores to be in turbo P-states at the same time,
+but the maximum P-state that can be set for them generally depends on the number
+of cores running concurrently.  The maximum turbo P-state that can be set for 3
+cores at the same time usually is lower than the analogous maximum P-state for
+2 cores, which in turn usually is lower than the maximum turbo P-state that can
+be set for 1 core.  The one-core maximum turbo P-state is thus the maximum
+supported one overall.
+
+The maximum supported turbo P-state, the turbo threshold (the maximum supported
+non-turbo P-state) and the minimum supported P-state are specific to the
+processor model and can be determined by reading the processor's model-specific
+registers (MSRs).  Moreover, some processors support the Configurable TDP
+(Thermal Design Power) feature and, when that feature is enabled, the turbo
+threshold effectively becomes a configurable value that can be set by the
+platform firmware.
+
+Unlike ``_PSS`` objects in the ACPI tables, ``intel_pstate`` always exposes
+the entire range of available P-states, including the whole turbo range, to the
+``CPUFreq`` core and (in the passive mode) to generic scaling governors.  This
+generally causes turbo P-states to be set more often when ``intel_pstate`` is
+used relative to ACPI-based CPU performance scaling (see `below <acpi-cpufreq_>`_
+for more information).
+
+Moreover, since ``intel_pstate`` always knows what the real turbo threshold is
+(even if the Configurable TDP feature is enabled in the processor), its
+``no_turbo`` attribute in ``sysfs`` (described `below <no_turbo_attr_>`_) should
+work as expected in all cases (that is, if set to disable turbo P-states, it
+always should prevent ``intel_pstate`` from using them).
+
+
+Processor Support
+=================
+
+To handle a given processor ``intel_pstate`` requires a number of different
+pieces of information on it to be known, including:
+
+ * The minimum supported P-state.
+
+ * The maximum supported `non-turbo P-state <turbo_>`_.
+
+ * Whether or not turbo P-states are supported at all.
+
+ * The maximum supported `one-core turbo P-state <turbo_>`_ (if turbo P-states
+   are supported).
+
+ * The scaling formula to translate the driver's internal representation
+   of P-states into frequencies and the other way around.
+
+Generally, ways to obtain that information are specific to the processor model
+or family.  Although it often is possible to obtain all of it from the processor
+itself (using model-specific registers), there are cases in which hardware
+manuals need to be consulted to get to it too.
+
+For this reason, there is a list of supported processors in ``intel_pstate`` and
+the driver initialization will fail if the detected processor is not in that
+list, unless it supports the `HWP feature <Active Mode_>`_.  [The interface to
+obtain all of the information listed above is the same for all of the processors
+supporting the HWP feature, which is why they all are supported by
+``intel_pstate``.]
+
+
+User Space Interface in ``sysfs``
+=================================
+
+Global Attributes
+-----------------
+
+``intel_pstate`` exposes several global attributes (files) in ``sysfs`` to
+control its functionality at the system level.  They are located in the
+``/sys/devices/system/cpu/cpufreq/intel_pstate/`` directory and affect all
+CPUs.
+
+Some of them are not present if the ``intel_pstate=per_cpu_perf_limits``
+argument is passed to the kernel in the command line.
+
+``max_perf_pct``
+       Maximum P-state the driver is allowed to set in percent of the
+       maximum supported performance level (the highest supported `turbo
+       P-state <turbo_>`_).
+
+       This attribute will not be exposed if the
+       ``intel_pstate=per_cpu_perf_limits`` argument is present in the kernel
+       command line.
+
+``min_perf_pct``
+       Minimum P-state the driver is allowed to set in percent of the
+       maximum supported performance level (the highest supported `turbo
+       P-state <turbo_>`_).
+
+       This attribute will not be exposed if the
+       ``intel_pstate=per_cpu_perf_limits`` argument is present in the kernel
+       command line.
+
+``num_pstates``
+       Number of P-states supported by the processor (between 0 and 255
+       inclusive) including both turbo and non-turbo P-states (see
+       `Turbo P-states Support`_).
+
+       The value of this attribute is not affected by the ``no_turbo``
+       setting described `below <no_turbo_attr_>`_.
+
+       This attribute is read-only.
+
+``turbo_pct``
+       Ratio of the `turbo range <turbo_>`_ size to the size of the entire
+       range of supported P-states, in percent.
+
+       This attribute is read-only.
+
+.. _no_turbo_attr:
+
+``no_turbo``
+       If set (equal to 1), the driver is not allowed to set any turbo P-states
+       (see `Turbo P-states Support`_).  If unset (equalt to 0, which is the
+       default), turbo P-states can be set by the driver.
+       [Note that ``intel_pstate`` does not support the general ``boost``
+       attribute (supported by some other scaling drivers) which is replaced
+       by this one.]
+
+       This attrubute does not affect the maximum supported frequency value
+       supplied to the ``CPUFreq`` core and exposed via the policy interface,
+       but it affects the maximum possible value of per-policy P-state limits
+       (see `Interpretation of Policy Attributes`_ below for details).
+
+.. _status_attr:
+
+``status``
+       Operation mode of the driver: "active", "passive" or "off".
+
+       "active"
+               The driver is functional and in the `active mode
+               <Active Mode_>`_.
+
+       "passive"
+               The driver is functional and in the `passive mode
+               <Passive Mode_>`_.
+
+       "off"
+               The driver is not functional (it is not registered as a scaling
+               driver with the ``CPUFreq`` core).
+
+       This attribute can be written to in order to change the driver's
+       operation mode or to unregister it.  The string written to it must be
+       one of the possible values of it and, if successful, the write will
+       cause the driver to switch over to the operation mode represented by
+       that string - or to be unregistered in the "off" case.  [Actually,
+       switching over from the active mode to the passive mode or the other
+       way around causes the driver to be unregistered and registered again
+       with a different set of callbacks, so all of its settings (the global
+       as well as the per-policy ones) are then reset to their default
+       values, possibly depending on the target operation mode.]
+
+       That only is supported in some configurations, though (for example, if
+       the `HWP feature is enabled in the processor <Active Mode With HWP_>`_,
+       the operation mode of the driver cannot be changed), and if it is not
+       supported in the current configuration, writes to this attribute with
+       fail with an appropriate error.
+
+Interpretation of Policy Attributes
+-----------------------------------
+
+The interpretation of some ``CPUFreq`` policy attributes described in
+:doc:`cpufreq` is special with ``intel_pstate`` as the current scaling driver
+and it generally depends on the driver's `operation mode <Operation Modes_>`_.
+
+First of all, the values of the ``cpuinfo_max_freq``, ``cpuinfo_min_freq`` and
+``scaling_cur_freq`` attributes are produced by applying a processor-specific
+multiplier to the internal P-state representation used by ``intel_pstate``.
+Also, the values of the ``scaling_max_freq`` and ``scaling_min_freq``
+attributes are capped by the frequency corresponding to the maximum P-state that
+the driver is allowed to set.
+
+If the ``no_turbo`` `global attribute <no_turbo_attr_>`_ is set, the driver is
+not allowed to use turbo P-states, so the maximum value of ``scaling_max_freq``
+and ``scaling_min_freq`` is limited to the maximum non-turbo P-state frequency.
+Accordingly, setting ``no_turbo`` causes ``scaling_max_freq`` and
+``scaling_min_freq`` to go down to that value if they were above it before.
+However, the old values of ``scaling_max_freq`` and ``scaling_min_freq`` will be
+restored after unsetting ``no_turbo``, unless these attributes have been written
+to after ``no_turbo`` was set.
+
+If ``no_turbo`` is not set, the maximum possible value of ``scaling_max_freq``
+and ``scaling_min_freq`` corresponds to the maximum supported turbo P-state,
+which also is the value of ``cpuinfo_max_freq`` in either case.
+
+Next, the following policy attributes have special meaning if
+``intel_pstate`` works in the `active mode <Active Mode_>`_:
+
+``scaling_available_governors``
+       List of P-state selection algorithms provided by ``intel_pstate``.
+
+``scaling_governor``
+       P-state selection algorithm provided by ``intel_pstate`` currently in
+       use with the given policy.
+
+``scaling_cur_freq``
+       Frequency of the average P-state of the CPU represented by the given
+       policy for the time interval between the last two invocations of the
+       driver's utilization update callback by the CPU scheduler for that CPU.
+
+The meaning of these attributes in the `passive mode <Passive Mode_>`_ is the
+same as for other scaling drivers.
+
+Additionally, the value of the ``scaling_driver`` attribute for ``intel_pstate``
+depends on the operation mode of the driver.  Namely, it is either
+"intel_pstate" (in the `active mode <Active Mode_>`_) or "intel_cpufreq" (in the
+`passive mode <Passive Mode_>`_).
+
+Coordination of P-State Limits
+------------------------------
+
+``intel_pstate`` allows P-state limits to be set in two ways: with the help of
+the ``max_perf_pct`` and ``min_perf_pct`` `global attributes
+<Global Attributes_>`_ or via the ``scaling_max_freq`` and ``scaling_min_freq``
+``CPUFreq`` policy attributes.  The coordination between those limits is based
+on the following rules, regardless of the current operation mode of the driver:
+
+ 1. All CPUs are affected by the global limits (that is, none of them can be
+    requested to run faster than the global maximum and none of them can be
+    requested to run slower than the global minimum).
+
+ 2. Each individual CPU is affected by its own per-policy limits (that is, it
+    cannot be requested to run faster than its own per-policy maximum and it
+    cannot be requested to run slower than its own per-policy minimum).
+
+ 3. The global and per-policy limits can be set independently.
+
+If the `HWP feature is enabled in the processor <Active Mode With HWP_>`_, the
+resulting effective values are written into its registers whenever the limits
+change in order to request its internal P-state selection logic to always set
+P-states within these limits.  Otherwise, the limits are taken into account by
+scaling governors (in the `passive mode <Passive Mode_>`_) and by the driver
+every time before setting a new P-state for a CPU.
+
+Additionally, if the ``intel_pstate=per_cpu_perf_limits`` command line argument
+is passed to the kernel, ``max_perf_pct`` and ``min_perf_pct`` are not exposed
+at all and the only way to set the limits is by using the policy attributes.
+
+
+Energy vs Performance Hints
+---------------------------
+
+If ``intel_pstate`` works in the `active mode with the HWP feature enabled
+<Active Mode With HWP_>`_ in the processor, additional attributes are present
+in every ``CPUFreq`` policy directory in ``sysfs``.  They are intended to allow
+user space to help ``intel_pstate`` to adjust the processor's internal P-state
+selection logic by focusing it on performance or on energy-efficiency, or
+somewhere between the two extremes:
+
+``energy_performance_preference``
+       Current value of the energy vs performance hint for the given policy
+       (or the CPU represented by it).
+
+       The hint can be changed by writing to this attribute.
+
+``energy_performance_available_preferences``
+       List of strings that can be written to the
+       ``energy_performance_preference`` attribute.
+
+       They represent different energy vs performance hints and should be
+       self-explanatory, except that ``default`` represents whatever hint
+       value was set by the platform firmware.
+
+Strings written to the ``energy_performance_preference`` attribute are
+internally translated to integer values written to the processor's
+Energy-Performance Preference (EPP) knob (if supported) or its
+Energy-Performance Bias (EPB) knob.
+
+[Note that tasks may by migrated from one CPU to another by the scheduler's
+load-balancing algorithm and if different energy vs performance hints are
+set for those CPUs, that may lead to undesirable outcomes.  To avoid such
+issues it is better to set the same energy vs performance hint for all CPUs
+or to pin every task potentially sensitive to them to a specific CPU.]
+
+.. _acpi-cpufreq:
+
+``intel_pstate`` vs ``acpi-cpufreq``
+====================================
+
+On the majority of systems supported by ``intel_pstate``, the ACPI tables
+provided by the platform firmware contain ``_PSS`` objects returning information
+that can be used for CPU performance scaling (refer to the `ACPI specification`_
+for details on the ``_PSS`` objects and the format of the information returned
+by them).
+
+The information returned by the ACPI ``_PSS`` objects is used by the
+``acpi-cpufreq`` scaling driver.  On systems supported by ``intel_pstate``
+the ``acpi-cpufreq`` driver uses the same hardware CPU performance scaling
+interface, but the set of P-states it can use is limited by the ``_PSS``
+output.
+
+On those systems each ``_PSS`` object returns a list of P-states supported by
+the corresponding CPU which basically is a subset of the P-states range that can
+be used by ``intel_pstate`` on the same system, with one exception: the whole
+`turbo range <turbo_>`_ is represented by one item in it (the topmost one).  By
+convention, the frequency returned by ``_PSS`` for that item is greater by 1 MHz
+than the frequency of the highest non-turbo P-state listed by it, but the
+corresponding P-state representation (following the hardware specification)
+returned for it matches the maximum supported turbo P-state (or is the
+special value 255 meaning essentially "go as high as you can get").
+
+The list of P-states returned by ``_PSS`` is reflected by the table of
+available frequencies supplied by ``acpi-cpufreq`` to the ``CPUFreq`` core and
+scaling governors and the minimum and maximum supported frequencies reported by
+it come from that list as well.  In particular, given the special representation
+of the turbo range described above, this means that the maximum supported
+frequency reported by ``acpi-cpufreq`` is higher by 1 MHz than the frequency
+of the highest supported non-turbo P-state listed by ``_PSS`` which, of course,
+affects decisions made by the scaling governors, except for ``powersave`` and
+``performance``.
+
+For example, if a given governor attempts to select a frequency proportional to
+estimated CPU load and maps the load of 100% to the maximum supported frequency
+(possibly multiplied by a constant), then it will tend to choose P-states below
+the turbo threshold if ``acpi-cpufreq`` is used as the scaling driver, because
+in that case the turbo range corresponds to a small fraction of the frequency
+band it can use (1 MHz vs 1 GHz or more).  In consequence, it will only go to
+the turbo range for the highest loads and the other loads above 50% that might
+benefit from running at turbo frequencies will be given non-turbo P-states
+instead.
+
+One more issue related to that may appear on systems supporting the
+`Configurable TDP feature <turbo_>`_ allowing the platform firmware to set the
+turbo threshold.  Namely, if that is not coordinated with the lists of P-states
+returned by ``_PSS`` properly, there may be more than one item corresponding to
+a turbo P-state in those lists and there may be a problem with avoiding the
+turbo range (if desirable or necessary).  Usually, to avoid using turbo
+P-states overall, ``acpi-cpufreq`` simply avoids using the topmost state listed
+by ``_PSS``, but that is not sufficient when there are other turbo P-states in
+the list returned by it.
+
+Apart from the above, ``acpi-cpufreq`` works like ``intel_pstate`` in the
+`passive mode <Passive Mode_>`_, except that the number of P-states it can set
+is limited to the ones listed by the ACPI ``_PSS`` objects.
+
+
+Kernel Command Line Options for ``intel_pstate``
+================================================
+
+Several kernel command line options can be used to pass early-configuration-time
+parameters to ``intel_pstate`` in order to enforce specific behavior of it.  All
+of them have to be prepended with the ``intel_pstate=`` prefix.
+
+``disable``
+       Do not register ``intel_pstate`` as the scaling driver even if the
+       processor is supported by it.
+
+``passive``
+       Register ``intel_pstate`` in the `passive mode <Passive Mode_>`_ to
+       start with.
+
+       This option implies the ``no_hwp`` one described below.
+
+``force``
+       Register ``intel_pstate`` as the scaling driver instead of
+       ``acpi-cpufreq`` even if the latter is preferred on the given system.
+
+       This may prevent some platform features (such as thermal controls and
+       power capping) that rely on the availability of ACPI P-states
+       information from functioning as expected, so it should be used with
+       caution.
+
+       This option does not work with processors that are not supported by
+       ``intel_pstate`` and on platforms where the ``pcc-cpufreq`` scaling
+       driver is used instead of ``acpi-cpufreq``.
+
+``no_hwp``
+       Do not enable the `hardware-managed P-states (HWP) feature
+       <Active Mode With HWP_>`_ even if it is supported by the processor.
+
+``hwp_only``
+       Register ``intel_pstate`` as the scaling driver only if the
+       `hardware-managed P-states (HWP) feature <Active Mode With HWP_>`_ is
+       supported by the processor.
+
+``support_acpi_ppc``
+       Take ACPI ``_PPC`` performance limits into account.
+
+       If the preferred power management profile in the FADT (Fixed ACPI
+       Description Table) is set to "Enterprise Server" or "Performance
+       Server", the ACPI ``_PPC`` limits are taken into account by default
+       and this option has no effect.
+
+``per_cpu_perf_limits``
+       Use per-logical-CPU P-State limits (see `Coordination of P-state
+       Limits`_ for details).
+
+
+Diagnostics and Tuning
+======================
+
+Trace Events
+------------
+
+There are two static trace events that can be used for ``intel_pstate``
+diagnostics.  One of them is the ``cpu_frequency`` trace event generally used
+by ``CPUFreq``, and the other one is the ``pstate_sample`` trace event specific
+to ``intel_pstate``.  Both of them are triggered by ``intel_pstate`` only if
+it works in the `active mode <Active Mode_>`_.
+
+The following sequence of shell commands can be used to enable them and see
+their output (if the kernel is generally configured to support event tracing)::
+
+ # cd /sys/kernel/debug/tracing/
+ # echo 1 > events/power/pstate_sample/enable
+ # echo 1 > events/power/cpu_frequency/enable
+ # cat trace
+ gnome-terminal--4510  [001] ..s.  1177.680733: pstate_sample: core_busy=107 scaled=94 from=26 to=26 mperf=1143818 aperf=1230607 tsc=29838618 freq=2474476
+ cat-5235  [002] ..s.  1177.681723: cpu_frequency: state=2900000 cpu_id=2
+
+If ``intel_pstate`` works in the `passive mode <Passive Mode_>`_, the
+``cpu_frequency`` trace event will be triggered either by the ``schedutil``
+scaling governor (for the policies it is attached to), or by the ``CPUFreq``
+core (for the policies with other scaling governors).
+
+``ftrace``
+----------
+
+The ``ftrace`` interface can be used for low-level diagnostics of
+``intel_pstate``.  For example, to check how often the function to set a
+P-state is called, the ``ftrace`` filter can be set to to
+:c:func:`intel_pstate_set_pstate`::
+
+ # cd /sys/kernel/debug/tracing/
+ # cat available_filter_functions | grep -i pstate
+ intel_pstate_set_pstate
+ intel_pstate_cpu_init
+ ...
+ # echo intel_pstate_set_pstate > set_ftrace_filter
+ # echo function > current_tracer
+ # cat trace | head -15
+ # tracer: function
+ #
+ # entries-in-buffer/entries-written: 80/80   #P:4
+ #
+ #                              _-----=> irqs-off
+ #                             / _----=> need-resched
+ #                            | / _---=> hardirq/softirq
+ #                            || / _--=> preempt-depth
+ #                            ||| /     delay
+ #           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+ #              | |       |   ||||       |         |
+             Xorg-3129  [000] ..s.  2537.644844: intel_pstate_set_pstate <-intel_pstate_timer_func
+  gnome-terminal--4510  [002] ..s.  2537.649844: intel_pstate_set_pstate <-intel_pstate_timer_func
+      gnome-shell-3409  [001] ..s.  2537.650850: intel_pstate_set_pstate <-intel_pstate_timer_func
+           <idle>-0     [000] ..s.  2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func
+
+Tuning Interface in ``debugfs``
+-------------------------------
+
+The ``powersave`` algorithm provided by ``intel_pstate`` for `the Core line of
+processors in the active mode <powersave_>`_ is based on a `PID controller`_
+whose parameters were chosen to address a number of different use cases at the
+same time.  However, it still is possible to fine-tune it to a specific workload
+and the ``debugfs`` interface under ``/sys/kernel/debug/pstate_snb/`` is
+provided for this purpose.  [Note that the ``pstate_snb`` directory will be
+present only if the specific P-state selection algorithm matching the interface
+in it actually is in use.]
+
+The following files present in that directory can be used to modify the PID
+controller parameters at run time:
+
+| ``deadband``
+| ``d_gain_pct``
+| ``i_gain_pct``
+| ``p_gain_pct``
+| ``sample_rate_ms``
+| ``setpoint``
+
+Note, however, that achieving desirable results this way generally requires
+expert-level understanding of the power vs performance tradeoff, so extra care
+is recommended when attempting to do that.
+
+
+.. _LCEU2015: http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
+.. _SDM: http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html
+.. _ACPI specification: http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf
+.. _PID controller: https://en.wikipedia.org/wiki/PID_controller
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
deleted file mode 100644 (file)
index 3fdcdfd..0000000
+++ /dev/null
@@ -1,281 +0,0 @@
-Intel P-State driver
---------------------
-
-This driver provides an interface to control the P-State selection for the
-SandyBridge+ Intel processors.
-
-The following document explains P-States:
-http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
-As stated in the document, P-State doesn’t exactly mean a frequency. However, for
-the sake of the relationship with cpufreq, P-State and frequency are used
-interchangeably.
-
-Understanding the cpufreq core governors and policies are important before
-discussing more details about the Intel P-State driver. Based on what callbacks
-a cpufreq driver provides to the cpufreq core, it can support two types of
-drivers:
-- with target_index() callback: In this mode, the drivers using cpufreq core
-simply provide the minimum and maximum frequency limits and an additional
-interface target_index() to set the current frequency. The cpufreq subsystem
-has a number of scaling governors ("performance", "powersave", "ondemand",
-etc.). Depending on which governor is in use, cpufreq core will call for
-transitions to a specific frequency using target_index() callback.
-- setpolicy() callback: In this mode, drivers do not provide target_index()
-callback, so cpufreq core can't request a transition to a specific frequency.
-The driver provides minimum and maximum frequency limits and callbacks to set a
-policy. The policy in cpufreq sysfs is referred to as the "scaling governor".
-The cpufreq core can request the driver to operate in any of the two policies:
-"performance" and "powersave". The driver decides which frequency to use based
-on the above policy selection considering minimum and maximum frequency limits.
-
-The Intel P-State driver falls under the latter category, which implements the
-setpolicy() callback. This driver decides what P-State to use based on the
-requested policy from the cpufreq core. If the processor is capable of
-selecting its next P-State internally, then the driver will offload this
-responsibility to the processor (aka HWP: Hardware P-States). If not, the
-driver implements algorithms to select the next P-State.
-
-Since these policies are implemented in the driver, they are not same as the
-cpufreq scaling governors implementation, even if they have the same name in
-the cpufreq sysfs (scaling_governors). For example the "performance" policy is
-similar to cpufreq’s "performance" governor, but "powersave" is completely
-different than the cpufreq "powersave" governor. The strategy here is similar
-to cpufreq "ondemand", where the requested P-State is related to the system load.
-
-Sysfs Interface
-
-In addition to the frequency-controlling interfaces provided by the cpufreq
-core, the driver provides its own sysfs files to control the P-State selection.
-These files have been added to /sys/devices/system/cpu/intel_pstate/.
-Any changes made to these files are applicable to all CPUs (even in a
-multi-package system, Refer to later section on placing "Per-CPU limits").
-
-      max_perf_pct: Limits the maximum P-State that will be requested by
-      the driver. It states it as a percentage of the available performance. The
-      available (P-State) performance may be reduced by the no_turbo
-      setting described below.
-
-      min_perf_pct: Limits the minimum P-State that will be requested by
-      the driver. It states it as a percentage of the max (non-turbo)
-      performance level.
-
-      no_turbo: Limits the driver to selecting P-State below the turbo
-      frequency range.
-
-      turbo_pct: Displays the percentage of the total performance that
-      is supported by hardware that is in the turbo range. This number
-      is independent of whether turbo has been disabled or not.
-
-      num_pstates: Displays the number of P-States that are supported
-      by hardware. This number is independent of whether turbo has
-      been disabled or not.
-
-For example, if a system has these parameters:
-       Max 1 core turbo ratio: 0x21 (Max 1 core ratio is the maximum P-State)
-       Max non turbo ratio: 0x17
-       Minimum ratio : 0x08 (Here the ratio is called max efficiency ratio)
-
-Sysfs will show :
-       max_perf_pct:100, which corresponds to 1 core ratio
-       min_perf_pct:24, max_efficiency_ratio / max 1 Core ratio
-       no_turbo:0, turbo is not disabled
-       num_pstates:26 = (max 1 Core ratio - Max Efficiency Ratio + 1)
-       turbo_pct:39 = (max 1 core ratio - max non turbo ratio) / num_pstates
-
-Refer to "Intel® 64 and IA-32 Architectures Software Developer’s Manual
-Volume 3: System Programming Guide" to understand ratios.
-
-There is one more sysfs attribute in /sys/devices/system/cpu/intel_pstate/
-that can be used for controlling the operation mode of the driver:
-
-      status: Three settings are possible:
-      "off"     - The driver is not in use at this time.
-      "active"  - The driver works as a P-state governor (default).
-      "passive" - The driver works as a regular cpufreq one and collaborates
-                  with the generic cpufreq governors (it sets P-states as
-                  requested by those governors).
-      The current setting is returned by reads from this attribute.  Writing one
-      of the above strings to it changes the operation mode as indicated by that
-      string, if possible.  If HW-managed P-states (HWP) are enabled, it is not
-      possible to change the driver's operation mode and attempts to write to
-      this attribute will fail.
-
-cpufreq sysfs for Intel P-State
-
-Since this driver registers with cpufreq, cpufreq sysfs is also presented.
-There are some important differences, which need to be considered.
-
-scaling_cur_freq: This displays the real frequency which was used during
-the last sample period instead of what is requested. Some other cpufreq driver,
-like acpi-cpufreq, displays what is requested (Some changes are on the
-way to fix this for acpi-cpufreq driver). The same is true for frequencies
-displayed at /proc/cpuinfo.
-
-scaling_governor: This displays current active policy. Since each CPU has a
-cpufreq sysfs, it is possible to set a scaling governor to each CPU. But this
-is not possible with Intel P-States, as there is one common policy for all
-CPUs. Here, the last requested policy will be applicable to all CPUs. It is
-suggested that one use the cpupower utility to change policy to all CPUs at the
-same time.
-
-scaling_setspeed: This attribute can never be used with Intel P-State.
-
-scaling_max_freq/scaling_min_freq: This interface can be used similarly to
-the max_perf_pct/min_perf_pct of Intel P-State sysfs. However since frequencies
-are converted to nearest possible P-State, this is prone to rounding errors.
-This method is not preferred to limit performance.
-
-affected_cpus: Not used
-related_cpus: Not used
-
-For contemporary Intel processors, the frequency is controlled by the
-processor itself and the P-State exposed to software is related to
-performance levels.  The idea that frequency can be set to a single
-frequency is fictional for Intel Core processors. Even if the scaling
-driver selects a single P-State, the actual frequency the processor
-will run at is selected by the processor itself.
-
-Per-CPU limits
-
-The kernel command line option "intel_pstate=per_cpu_perf_limits" forces
-the intel_pstate driver to use per-CPU performance limits.  When it is set,
-the sysfs control interface described above is subject to limitations.
-- The following controls are not available for both read and write
-       /sys/devices/system/cpu/intel_pstate/max_perf_pct
-       /sys/devices/system/cpu/intel_pstate/min_perf_pct
-- The following controls can be used to set performance limits, as far as the
-architecture of the processor permits:
-       /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq
-       /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq
-       /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
-- User can still observe turbo percent and number of P-States from
-       /sys/devices/system/cpu/intel_pstate/turbo_pct
-       /sys/devices/system/cpu/intel_pstate/num_pstates
-- User can read write system wide turbo status
-       /sys/devices/system/cpu/no_turbo
-
-Support of energy performance hints
-It is possible to provide hints to the HWP algorithms in the processor
-to be more performance centric to more energy centric. When the driver
-is using HWP, two additional cpufreq sysfs attributes are presented for
-each logical CPU.
-These attributes are:
-       - energy_performance_available_preferences
-       - energy_performance_preference
-
-To get list of supported hints:
-$ cat energy_performance_available_preferences
-    default performance balance_performance balance_power power
-
-The current preference can be read or changed via cpufreq sysfs
-attribute "energy_performance_preference". Reading from this attribute
-will display current effective setting. User can write any of the valid
-preference string to this attribute. User can always restore to power-on
-default by writing "default".
-
-Since threads can migrate to different CPUs, this is possible that the
-new CPU may have different energy performance preference than the previous
-one. To avoid such issues, either threads can be pinned to specific CPUs
-or set the same energy performance preference value to all CPUs.
-
-Tuning Intel P-State driver
-
-When the performance can be tuned using PID (Proportional Integral
-Derivative) controller, debugfs files are provided for adjusting performance.
-They are presented under:
-/sys/kernel/debug/pstate_snb/
-
-The PID tunable parameters are:
-      deadband
-      d_gain_pct
-      i_gain_pct
-      p_gain_pct
-      sample_rate_ms
-      setpoint
-
-To adjust these parameters, some understanding of driver implementation is
-necessary. There are some tweeks described here, but be very careful. Adjusting
-them requires expert level understanding of power and performance relationship.
-These limits are only useful when the "powersave" policy is active.
-
--To make the system more responsive to load changes, sample_rate_ms can
-be adjusted  (current default is 10ms).
--To make the system use higher performance, even if the load is lower, setpoint
-can be adjusted to a lower number. This will also lead to faster ramp up time
-to reach the maximum P-State.
-If there are no derivative and integral coefficients, The next P-State will be
-equal to:
-       current P-State - ((setpoint - current cpu load) * p_gain_pct)
-
-For example, if the current PID parameters are (Which are defaults for the core
-processors like SandyBridge):
-      deadband = 0
-      d_gain_pct = 0
-      i_gain_pct = 0
-      p_gain_pct = 20
-      sample_rate_ms = 10
-      setpoint = 97
-
-If the current P-State = 0x08 and current load = 100, this will result in the
-next P-State = 0x08 - ((97 - 100) * 0.2) = 8.6 (rounded to 9). Here the P-State
-goes up by only 1. If during next sample interval the current load doesn't
-change and still 100, then P-State goes up by one again. This process will
-continue as long as the load is more than the setpoint until the maximum P-State
-is reached.
-
-For the same load at setpoint = 60, this will result in the next P-State
-= 0x08 - ((60 - 100) * 0.2) = 16
-So by changing the setpoint from 97 to 60, there is an increase of the
-next P-State from 9 to 16. So this will make processor execute at higher
-P-State for the same CPU load. If the load continues to be more than the
-setpoint during next sample intervals, then P-State will go up again till the
-maximum P-State is reached. But the ramp up time to reach the maximum P-State
-will be much faster when the setpoint is 60 compared to 97.
-
-Debugging Intel P-State driver
-
-Event tracing
-To debug P-State transition, the Linux event tracing interface can be used.
-There are two specific events, which can be enabled (Provided the kernel
-configs related to event tracing are enabled).
-
-# cd /sys/kernel/debug/tracing/
-# echo 1 > events/power/pstate_sample/enable
-# echo 1 > events/power/cpu_frequency/enable
-# cat trace
-gnome-terminal--4510  [001] ..s.  1177.680733: pstate_sample: core_busy=107
-       scaled=94 from=26 to=26 mperf=1143818 aperf=1230607 tsc=29838618
-               freq=2474476
-cat-5235  [002] ..s.  1177.681723: cpu_frequency: state=2900000 cpu_id=2
-
-
-Using ftrace
-
-If function level tracing is required, the Linux ftrace interface can be used.
-For example if we want to check how often a function to set a P-State is
-called, we can set ftrace filter to intel_pstate_set_pstate.
-
-# cd /sys/kernel/debug/tracing/
-# cat available_filter_functions | grep -i pstate
-intel_pstate_set_pstate
-intel_pstate_cpu_init
-...
-
-# echo intel_pstate_set_pstate > set_ftrace_filter
-# echo function > current_tracer
-# cat trace | head -15
-# tracer: function
-#
-# entries-in-buffer/entries-written: 80/80   #P:4
-#
-#                              _-----=> irqs-off
-#                             / _----=> need-resched
-#                            | / _---=> hardirq/softirq
-#                            || / _--=> preempt-depth
-#                            ||| /     delay
-#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
-#              | |       |   ||||       |         |
-            Xorg-3129  [000] ..s.  2537.644844: intel_pstate_set_pstate <-intel_pstate_timer_func
- gnome-terminal--4510  [002] ..s.  2537.649844: intel_pstate_set_pstate <-intel_pstate_timer_func
-     gnome-shell-3409  [001] ..s.  2537.650850: intel_pstate_set_pstate <-intel_pstate_timer_func
-          <idle>-0     [000] ..s.  2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func
index 6db22103e2dd535952143a2bae9ba4c6790c6e56..025cf8c9324ac362eeee56b5d4f9df09db466180 100644 (file)
@@ -36,7 +36,7 @@ Optional properties:
                 control gpios
 
  - threshold:   allows setting the "click"-threshold in the range
-                from 20 to 80.
+                from 0 to 80.
 
  - gain:        allows setting the sensitivity in the range from 0 to
                 31. Note that lower values indicate higher
index 05485699d70e7c85cb25eb83f39b86f5be0daaca..9630ac0e4b56ef372f08a0850175be06d13cd9a7 100644 (file)
@@ -16,6 +16,11 @@ Required properties:
 - reg:                  Base address of PMIC on Hi6220 SoC.
 - interrupt-controller: Hi655x has internal IRQs (has own IRQ domain).
 - pmic-gpios:           The GPIO used by PMIC IRQ.
+- #clock-cells:                From common clock binding; shall be set to 0
+
+Optional properties:
+- clock-output-names: From common clock binding to override the
+  default output clock name
 
 Example:
        pmic: pmic@f8000000 {
@@ -24,4 +29,5 @@ Example:
                interrupt-controller;
                #interrupt-cells = <2>;
                pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+               #clock-cells = <0>;
        }
index e25436861867f28f75a2450ecbf28d1fd2df3921..9029b45b8a22a8d5b6a1afa3184bf389a85202f4 100644 (file)
@@ -18,6 +18,8 @@ Optional properties:
   "ext_clock" (External clock provided to the card).
 - post-power-on-delay-ms : Delay in ms after powering the card and
        de-asserting the reset-gpios (if any)
+- power-off-delay-us : Delay in us after asserting the reset-gpios (if any)
+       during power off of the card.
 
 Example:
 
index 7ef9dbb08957a593528a4d873d1f3af29892f5c2..1d4d0f49c9d06eb66d9957fb0661cec35ddc7af9 100644 (file)
@@ -26,6 +26,10 @@ Optional properties:
 - interrupt-controller : Indicates the switch is itself an interrupt
                          controller. This is used for the PHY interrupts.
 #interrupt-cells = <2> : Controller uses two cells, number and flag
+- eeprom-length                : Set to the length of an EEPROM connected to the
+                         switch. Must be set if the switch can not detect
+                         the presence and/or size of a connected EEPROM,
+                         otherwise optional.
 - mdio                 : Container of PHY and devices on the switches MDIO
                          bus.
 - mdio?                : Container of PHYs and devices on the external MDIO
index a1e3693cca1601e47096dc2edc2a9580b8b0569a..6f55bdd52f8a99be3c0741cf6411a00554c85778 100644 (file)
@@ -15,6 +15,10 @@ Optional properties:
 - phy-reset-active-high : If present then the reset sequence using the GPIO
   specified in the "phy-reset-gpios" property is reversed (H=reset state,
   L=operation state).
+- phy-reset-post-delay : Post reset delay in milliseconds. If present then
+  a delay of phy-reset-post-delay milliseconds will be observed after the
+  phy-reset-gpios has been toggled. Can be omitted thus no delay is
+  observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
 - phy-supply : regulator that powers the Ethernet PHY.
 - phy-handle : phandle to the PHY device connected to this device.
 - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
index 71a3c134af1b25d58613699aa465d17cff2c23d0..f01d154090dab1b3a8a58fee6a599459392aa1f8 100644 (file)
@@ -247,7 +247,6 @@ bias-bus-hold               - latch weakly
 bias-pull-up           - pull up the pin
 bias-pull-down         - pull down the pin
 bias-pull-pin-default  - use pin-default pull state
-bi-directional         - pin supports simultaneous input/output operations
 drive-push-pull                - drive actively high and low
 drive-open-drain       - drive with open drain
 drive-open-source      - drive with open source
@@ -260,7 +259,6 @@ input-debounce              - debounce mode with debound time X
 power-source           - select between different power supplies
 low-power-enable       - enable low power mode
 low-power-disable      - disable low power mode
-output-enable          - enable output on pin regardless of output value
 output-low             - set the pin to output mode with low level
 output-high            - set the pin to output mode with high level
 slew-rate              - set the slew rate
index 2032f0b7a8fa125dc3c88842dbeccb25a41a3453..1ccc94b192b7edacd8a42ff81de923b4a35a27e7 100644 (file)
@@ -15,7 +15,7 @@ It has been tested with the following devices:
 The driver allows configuration of the touch screen via a set of sysfs files:
 
 /sys/class/input/eventX/device/device/threshold:
-    allows setting the "click"-threshold in the range from 20 to 80.
+    allows setting the "click"-threshold in the range from 0 to 80.
 
 /sys/class/input/eventX/device/device/gain:
     allows setting the sensitivity in the range from 0 to 31. Note that
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt
new file mode 100644 (file)
index 0000000..76e016d
--- /dev/null
@@ -0,0 +1,194 @@
+The QorIQ DPAA Ethernet Driver
+==============================
+
+Authors:
+Madalin Bucur <madalin.bucur@nxp.com>
+Camelia Groza <camelia.groza@nxp.com>
+
+Contents
+========
+
+       - DPAA Ethernet Overview
+       - DPAA Ethernet Supported SoCs
+       - Configuring DPAA Ethernet in your kernel
+       - DPAA Ethernet Frame Processing
+       - DPAA Ethernet Features
+       - Debugging
+
+DPAA Ethernet Overview
+======================
+
+DPAA stands for Data Path Acceleration Architecture and it is a
+set of networking acceleration IPs that are available on several
+generations of SoCs, both on PowerPC and ARM64.
+
+The Freescale DPAA architecture consists of a series of hardware blocks
+that support Ethernet connectivity. The Ethernet driver depends upon the
+following drivers in the Linux kernel:
+
+ - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
+    drivers/iommu/fsl_*
+ - Frame Manager (FMan)
+    drivers/net/ethernet/freescale/fman
+ - Queue Manager (QMan), Buffer Manager (BMan)
+    drivers/soc/fsl/qbman
+
+A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
+
+  dpaa_eth       /eth0\     ...       /ethN\
+  driver        |      |             |      |
+  -------------   ----   -----------   ----   -------------
+       -Ports  / Tx  Rx \    ...    / Tx  Rx \
+  FMan        |          |         |          |
+       -MACs  |   MAC0   |         |   MACN   |
+             /   dtsec0   \  ...  /   dtsecN   \ (or tgec)
+            /              \     /              \(or memac)
+  ---------  --------------  ---  --------------  ---------
+      FMan, FMan Port, FMan SP, FMan MURAM drivers
+  ---------------------------------------------------------
+      FMan HW blocks: MURAM, MACs, Ports, SP
+  ---------------------------------------------------------
+
+The dpaa_eth relation to the QMan, BMan and FMan:
+              ________________________________
+  dpaa_eth   /            eth0                \
+  driver    /                                  \
+  ---------   -^-   -^-   -^-   ---    ---------
+  QMan driver / \   / \   / \  \   /  | BMan    |
+             |Rx | |Rx | |Tx | |Tx |  | driver  |
+  ---------  |Dfl| |Err| |Cnf| |FQs|  |         |
+  QMan HW    |FQ | |FQ | |FQs| |   |  |         |
+             /   \ /   \ /   \  \ /   |         |
+  ---------   ---   ---   ---   -v-    ---------
+            |        FMan QMI         |         |
+            | FMan HW       FMan BMI  | BMan HW |
+              -----------------------   --------
+
+where the acronyms used above (and in the code) are:
+DPAA = Data Path Acceleration Architecture
+FMan = DPAA Frame Manager
+QMan = DPAA Queue Manager
+BMan = DPAA Buffers Manager
+QMI = QMan interface in FMan
+BMI = BMan interface in FMan
+FMan SP = FMan Storage Profiles
+MURAM = Multi-user RAM in FMan
+FQ = QMan Frame Queue
+Rx Dfl FQ = default reception FQ
+Rx Err FQ = Rx error frames FQ
+Tx Cnf FQ = Tx confirmation FQs
+Tx FQs = transmission frame queues
+dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
+tgec = ten gigabit Ethernet controller (10 Gbps)
+memac = multirate Ethernet MAC (10/100/1000/10000)
+
+DPAA Ethernet Supported SoCs
+============================
+
+The DPAA drivers enable the Ethernet controllers present on the following SoCs:
+
+# PPC
+P1023
+P2041
+P3041
+P4080
+P5020
+P5040
+T1023
+T1024
+T1040
+T1042
+T2080
+T4240
+B4860
+
+# ARM
+LS1043A
+LS1046A
+
+Configuring DPAA Ethernet in your kernel
+========================================
+
+To enable the DPAA Ethernet driver, the following Kconfig options are required:
+
+# common for arch/arm64 and arch/powerpc platforms
+CONFIG_FSL_DPAA=y
+CONFIG_FSL_FMAN=y
+CONFIG_FSL_DPAA_ETH=y
+CONFIG_FSL_XGMAC_MDIO=y
+
+# for arch/powerpc only
+CONFIG_FSL_PAMU=y
+
+# common options needed for the PHYs used on the RDBs
+CONFIG_VITESSE_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_AQUANTIA_PHY=y
+
+DPAA Ethernet Frame Processing
+==============================
+
+On Rx, buffers for the incoming frames are retrieved from one of the three
+existing buffers pools. The driver initializes and seeds these, each with
+buffers of different sizes: 1KB, 2KB and 4KB.
+
+On Tx, all transmitted frames are returned to the driver through Tx
+confirmation frame queues. The driver is then responsible for freeing the
+buffers. In order to do this properly, a backpointer is added to the buffer
+before transmission that points to the skb. When the buffer returns to the
+driver on a confirmation FQ, the skb can be correctly consumed.
+
+DPAA Ethernet Features
+======================
+
+Currently the DPAA Ethernet driver enables the basic features required for
+a Linux Ethernet driver. The support for advanced features will be added
+gradually.
+
+The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
+checksum offload feature is enabled by default and cannot be controlled through
+ethtool.
+
+The driver has support for multiple prioritized Tx traffic classes. Priorities
+range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
+strict priority levels. Each traffic class contains NR_CPU TX queues. By
+default, only one traffic class is enabled and the lowest priority Tx queues
+are used. Higher priority traffic classes can be enabled with the mqprio
+qdisc. For example, all four traffic classes are enabled on an interface with
+the following command. Furthermore, skb priority levels are mapped to traffic
+classes as follows:
+
+       * priorities 0 to 3 - traffic class 0 (low priority)
+       * priorities 4 to 7 - traffic class 1 (medium-low priority)
+       * priorities 8 to 11 - traffic class 2 (medium-high priority)
+       * priorities 12 to 15 - traffic class 3 (high priority)
+
+tc qdisc add dev <int> root handle 1: \
+        mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
+
+Debugging
+=========
+
+The following statistics are exported for each interface through ethtool:
+
+       - interrupt count per CPU
+       - Rx packets count per CPU
+       - Tx packets count per CPU
+       - Tx confirmed packets count per CPU
+       - Tx S/G frames count per CPU
+       - Tx error count per CPU
+       - Rx error count per CPU
+       - Rx error count per type
+       - congestion related statistics:
+               - congestion status
+               - time spent in congestion
+               - number of time the device entered congestion
+               - dropped packets count per cause
+
+The driver also exports the following information in sysfs:
+
+       - the FQ IDs for each FQ type
+       /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
+
+       - the IDs of the buffer pools in use
+       /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
index bdc4c0db51e1078fb002907124fe7008ef4c0cd4..9c7139d57e5748508ce20400349cfdefd0284f74 100644 (file)
@@ -1,7 +1,7 @@
 TCP protocol
 ============
 
-Last updated: 9 February 2008
+Last updated: 3 June 2017
 
 Contents
 ========
@@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms.
 A congestion control mechanism can be registered through functions in
 tcp_cong.c. The functions used by the congestion control mechanism are
 registered via passing a tcp_congestion_ops struct to
-tcp_register_congestion_control. As a minimum name, ssthresh,
-cong_avoid must be valid.
+tcp_register_congestion_control. As a minimum, the congestion control
+mechanism must provide a valid name and must implement either ssthresh,
+cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
 
 Private data for a congestion control mechanism is stored in tp->ca_priv.
 tcp_ca(tp) returns a pointer to this space.  This is preallocated space - it
 is important to check the size of your private data will fit this space, or
-alternatively space could be allocated elsewhere and a pointer to it could
+alternatively, space could be allocated elsewhere and a pointer to it could
 be stored here.
 
 There are three kinds of congestion control algorithms currently: The
 simplest ones are derived from TCP reno (highspeed, scalable) and just
-provide an alternative the congestion window calculation. More complex
+provide an alternative congestion window calculation. More complex
 ones like BIC try to look at other events to provide better
 heuristics.  There are also round trip time based algorithms like
 Vegas and Westwood+.
@@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm
 needs to maintain fairness and performance. Please review current
 research and RFC's before developing new modules.
 
-The method that is used to determine which congestion control mechanism is
-determined by the setting of the sysctl net.ipv4.tcp_congestion_control.
-The default congestion control will be the last one registered (LIFO);
-so if you built everything as modules, the default will be reno. If you
-build with the defaults from Kconfig, then CUBIC will be builtin (not a
-module) and it will end up the default.
+The default congestion control mechanism is chosen based on the
+DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
+value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
+module will be autoloaded if needed and you will get the expected protocol. If
+you ask for an unknown congestion method, then the sysctl attempt will fail.
 
-If you really want a particular default value then you will need
-to set it with the sysctl.  If you use a sysctl, the module will be autoloaded
-if needed and you will get the expected protocol. If you ask for an
-unknown congestion method, then the sysctl attempt will fail.
-
-If you remove a tcp congestion control module, then you will get the next
+If you remove a TCP congestion control module, then you will get the next
 available one. Since reno cannot be built as a module, and cannot be
-deleted, it will always be available.
+removed, it will always be available.
 
 How the new TCP output machine [nyi] works.
 ===========================================
index 01dc6a4fb16c9c3ace0f90c76bc5f3a0af40ea81..4d8e525b84eeb876cd61235e86ea76f01c1455e2 100644 (file)
@@ -1172,7 +1172,7 @@ N:        clps711x
 
 ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
 M:     Hartley Sweeten <hsweeten@visionengravers.com>
-M:     Ryan Mallon <rmallon@gmail.com>
+M:     Alexander Sverdlin <alexander.sverdlin@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-ep93xx/
@@ -1489,13 +1489,15 @@ M:      Gregory Clement <gregory.clement@free-electrons.com>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-F:     arch/arm/mach-mvebu/
-F:     drivers/rtc/rtc-armada38x.c
 F:     arch/arm/boot/dts/armada*
 F:     arch/arm/boot/dts/kirkwood*
+F:     arch/arm/configs/mvebu_*_defconfig
+F:     arch/arm/mach-mvebu/
 F:     arch/arm64/boot/dts/marvell/armada*
 F:     drivers/cpufreq/mvebu-cpufreq.c
-F:     arch/arm/configs/mvebu_*_defconfig
+F:     drivers/irqchip/irq-armada-370-xp.c
+F:     drivers/irqchip/irq-mvebu-*
+F:     drivers/rtc/rtc-armada38x.c
 
 ARM/Marvell Berlin SoC support
 M:     Jisheng Zhang <jszhang@marvell.com>
@@ -1721,7 +1723,6 @@ N:        rockchip
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
 M:     Krzysztof Kozlowski <krzk@kernel.org>
-R:     Javier Martinez Canillas <javier@osg.samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 Q:     https://patchwork.kernel.org/project/linux-samsung-soc/list/
@@ -1829,7 +1830,6 @@ F:        drivers/edac/altera_edac.
 ARM/STI ARCHITECTURE
 M:     Patrice Chotard <patrice.chotard@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L:     kernel@stlinux.com
 W:     http://www.stlinux.com
 S:     Maintained
 F:     arch/arm/mach-sti/
@@ -7143,7 +7143,7 @@ S:        Maintained
 F:     drivers/media/platform/rcar_jpu.c
 
 JSM Neo PCI based serial card
-M:     Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
+M:     Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
 L:     linux-serial@vger.kernel.org
 S:     Maintained
 F:     drivers/tty/serial/jsm/
@@ -7707,7 +7707,7 @@ F:        drivers/platform/x86/hp_accel.c
 
 LIVE PATCHING
 M:     Josh Poimboeuf <jpoimboe@redhat.com>
-M:     Jessica Yu <jeyu@redhat.com>
+M:     Jessica Yu <jeyu@kernel.org>
 M:     Jiri Kosina <jikos@kernel.org>
 M:     Miroslav Benes <mbenes@suse.cz>
 R:     Petr Mladek <pmladek@suse.com>
@@ -8508,7 +8508,7 @@ S:        Odd Fixes
 F:     drivers/media/radio/radio-miropcm20*
 
 MELLANOX MLX4 core VPI driver
-M:     Yishai Hadas <yishaih@mellanox.com>
+M:     Tariq Toukan <tariqt@mellanox.com>
 L:     netdev@vger.kernel.org
 L:     linux-rdma@vger.kernel.org
 W:     http://www.mellanox.com
@@ -8516,7 +8516,6 @@ Q:        http://patchwork.ozlabs.org/project/netdev/list/
 S:     Supported
 F:     drivers/net/ethernet/mellanox/mlx4/
 F:     include/linux/mlx4/
-F:     include/uapi/rdma/mlx4-abi.h
 
 MELLANOX MLX4 IB driver
 M:     Yishai Hadas <yishaih@mellanox.com>
@@ -8526,6 +8525,7 @@ Q:        http://patchwork.kernel.org/project/linux-rdma/list/
 S:     Supported
 F:     drivers/infiniband/hw/mlx4/
 F:     include/linux/mlx4/
+F:     include/uapi/rdma/mlx4-abi.h
 
 MELLANOX MLX5 core VPI driver
 M:     Saeed Mahameed <saeedm@mellanox.com>
@@ -8538,7 +8538,6 @@ Q:        http://patchwork.ozlabs.org/project/netdev/list/
 S:     Supported
 F:     drivers/net/ethernet/mellanox/mlx5/core/
 F:     include/linux/mlx5/
-F:     include/uapi/rdma/mlx5-abi.h
 
 MELLANOX MLX5 IB driver
 M:     Matan Barak <matanb@mellanox.com>
@@ -8549,6 +8548,7 @@ Q:        http://patchwork.kernel.org/project/linux-rdma/list/
 S:     Supported
 F:     drivers/infiniband/hw/mlx5/
 F:     include/linux/mlx5/
+F:     include/uapi/rdma/mlx5-abi.h
 
 MELEXIS MLX90614 DRIVER
 M:     Crt Mori <cmo@melexis.com>
@@ -8588,7 +8588,7 @@ S:        Maintained
 F:     drivers/media/dvb-frontends/mn88473*
 
 MODULE SUPPORT
-M:     Jessica Yu <jeyu@redhat.com>
+M:     Jessica Yu <jeyu@kernel.org>
 M:     Rusty Russell <rusty@rustcorp.com.au>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
 S:     Maintained
@@ -10450,7 +10450,7 @@ S:      Orphan
 
 PXA RTC DRIVER
 M:     Robert Jarzmik <robert.jarzmik@free.fr>
-L:     rtc-linux@googlegroups.com
+L:     linux-rtc@vger.kernel.org
 S:     Maintained
 
 QAT DRIVER
@@ -10757,7 +10757,7 @@ X:      kernel/torture.c
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
 M:     Alexandre Belloni <alexandre.belloni@free-electrons.com>
-L:     rtc-linux@googlegroups.com
+L:     linux-rtc@vger.kernel.org
 Q:     http://patchwork.ozlabs.org/project/rtc-linux/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
 S:     Maintained
@@ -11268,7 +11268,6 @@ F:      drivers/media/rc/serial_ir.c
 
 STI CEC DRIVER
 M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
-L:     kernel@stlinux.com
 S:     Maintained
 F:     drivers/staging/media/st-cec/
 F:     Documentation/devicetree/bindings/media/stih-cec.txt
@@ -11778,6 +11777,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
 S:     Supported
 F:     arch/arm/mach-davinci/
 F:     drivers/i2c/busses/i2c-davinci.c
+F:     arch/arm/boot/dts/da850*
 
 TI DAVINCI SERIES MEDIA DRIVER
 M:     "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
index 63e10bd4f14ac25297b3841eec5b2be6ee03035b..853ae9179af93a0ca9751a7faf40372e7fdb15dd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 9d5dc4fda3c16710c0443b6a5043f212230f057d..3f7d1b74c5e02bd46730c58b0a66756c89b904ab 100644 (file)
                @ there.
                .inst   'M' | ('Z' << 8) | (0x1310 << 16)   @ tstne r0, #0x4d000
 #else
-               mov     r0, r0
+               W(mov)  r0, r0
 #endif
                .endm
 
                .macro  __EFI_HEADER
 #ifdef CONFIG_EFI_STUB
-               b       __efi_start
-
                .set    start_offset, __efi_start - start
                .org    start + 0x3c
                @
index 7c711ba614173d91d8c2fd6ff4ccb13980bb3109..8a756870c238435af684215c653f54a739f4f1a5 100644 (file)
@@ -130,19 +130,22 @@ start:
                .rept   7
                __nop
                .endr
-   ARM(                mov     r0, r0          )
-   ARM(                b       1f              )
- THUMB(                badr    r12, 1f         )
- THUMB(                bx      r12             )
+#ifndef CONFIG_THUMB2_KERNEL
+               mov     r0, r0
+#else
+ AR_CLASS(     sub     pc, pc, #3      )       @ A/R: switch to Thumb2 mode
+  M_CLASS(     nop.w                   )       @ M: already in Thumb2 mode
+               .thumb
+#endif
+               W(b)    1f
 
                .word   _magic_sig      @ Magic numbers to help the loader
                .word   _magic_start    @ absolute load/run zImage address
                .word   _magic_end      @ zImage end address
                .word   0x04030201      @ endianness flag
 
- THUMB(                .thumb                  )
-1:             __EFI_HEADER
-
+               __EFI_HEADER
+1:
  ARM_BE8(      setend  be              )       @ go BE8 if compiled for BE8
  AR_CLASS(     mrs     r9, cpsr        )
 #ifdef CONFIG_ARM_VIRT_EXT
index 561f27d8d92224fe8f4f8c3224a5441f2d41175a..9444a9a9ba1057e6b594dc8e2595ac1e5ec593fb 100644 (file)
@@ -3,6 +3,11 @@
 #include <dt-bindings/clock/bcm2835-aux.h>
 #include <dt-bindings/gpio/gpio.h>
 
+/* firmware-provided startup stubs live here, where the secondary CPUs are
+ * spinning.
+ */
+/memreserve/ 0x00000000 0x00001000;
+
 /* This include file covers the common peripherals and configuration between
  * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
  * bcm2835.dtsi and bcm2836.dtsi.
index f18e1f1d0ce2c6aad83c08b7ad8ac4cfc41e715e..d2be8aa3370b7840e014964c147a4742d5ca87e8 100644 (file)
 
                ethphy0: ethernet-phy@2 {
                        reg = <2>;
+                       micrel,led-mode = <1>;
+                       clocks = <&clks IMX6UL_CLK_ENET_REF>;
+                       clock-names = "rmii-ref";
                };
 
                ethphy1: ethernet-phy@1 {
                        reg = <1>;
+                       micrel,led-mode = <1>;
+                       clocks = <&clks IMX6UL_CLK_ENET2_REF>;
+                       clock-names = "rmii-ref";
                };
        };
 };
index b6f26824e83a96a88e34d33b99a0f4dace08e306..66f615a74118b9da1fe77088b63be8738c17f6c4 100644 (file)
@@ -137,8 +137,8 @@ netcp: netcp@26000000 {
        /* NetCP address range */
        ranges = <0 0x26000000 0x1000000>;
 
-       clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>;
-       clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk";
+       clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
+       clock-names = "pa_clk", "ethss_clk", "cpts";
        dma-coherent;
 
        ti,navigator-dmas = <&dma_gbe 0>,
index b58e7ebc091994645dd1adb35c7e7dc843fae7b0..148650406cf701cd7ffc5ac92d9054d606e97bfd 100644 (file)
                        };
                };
 
+               osr: sram@70000000 {
+                       compatible = "mmio-sram";
+                       reg = <0x70000000 0x10000>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       clocks = <&clkosr>;
+               };
+
                dspgpio0: keystone_dsp_gpio@02620240 {
                        compatible = "ti,keystone-dsp-gpio";
                        gpio-controller;
index 33a8eb28374eaa8d3b8aca95d8801227bedd87ca..06e2331f666d45fb2a2432ac1ef5401c3c50e37d 100644 (file)
@@ -1,4 +1,4 @@
-#include <versatile-ab.dts>
+#include "versatile-ab.dts"
 
 / {
        model = "ARM Versatile PB";
index cf062472e07bcb4be470bf35ab029df3438dbc7e..2b913f17d50f5d91f50d3aa30e3a8a26c97847b9 100644 (file)
@@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
        return ret;
 }
 
-typedef void (*phys_reset_t)(unsigned long);
+typedef typeof(cpu_reset) phys_reset_t;
 
 void mcpm_cpu_power_down(void)
 {
@@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void)
         * on the CPU.
         */
        phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
-       phys_reset(__pa_symbol(mcpm_entry_point));
+       phys_reset(__pa_symbol(mcpm_entry_point), false);
 
        /* should never get here */
        BUG();
@@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg)
        __mcpm_cpu_down(cpu, cluster);
 
        phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
-       phys_reset(__pa_symbol(mcpm_entry_point));
+       phys_reset(__pa_symbol(mcpm_entry_point), false);
        BUG();
 }
 
index 302240c19a5aa688e7bdab1ece506dfbeaccea4e..a0d726a47c8a272b722b0d5623021058da50e113 100644 (file)
@@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t;
 #define pgprot_noncached(prot) (prot)
 #define pgprot_writecombine(prot) (prot)
 #define pgprot_dmacoherent(prot) (prot)
+#define pgprot_device(prot)    (prot)
 
 
 /*
index 841e924143f90e089bae9269acacdff65ab597f9..cbd959b73654c43deb72cbe084bdb99043a22b66 100644 (file)
@@ -1,6 +1,7 @@
 menuconfig ARCH_AT91
        bool "Atmel SoCs"
        depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
+       select ARM_CPU_SUSPEND if PM
        select COMMON_CLK_AT91
        select GPIOLIB
        select PINCTRL
index efb80354f3034d856ab259bb1568dab41173d3b3..b5cc05dc2cb27c9e20e5f8290b65fff2efeec8ec 100644 (file)
@@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
        davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
        if (!davinci_sram_suspend) {
                pr_err("PM: cannot allocate SRAM memory\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto no_sram_mem;
        }
 
        davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
@@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
 
        suspend_set_ops(&davinci_pm_ops);
 
+       return 0;
+
+no_sram_mem:
+       iounmap(pm_config.ddrpsc_reg_base);
 no_ddrpsc_mem:
        iounmap(pm_config.ddrpll_reg_base);
 no_ddrpll_mem:
index 75bce2d0b1a83fa9f953b55a75ebdcb2ff4dc526..49f6a6242cf9fa3d9cd64d4ef22f8abf74e8e885 100644 (file)
                };
        };
 
+       reg_sys_5v: regulator@0 {
+               compatible = "regulator-fixed";
+               regulator-name = "SYS_5V";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_vdd_3v3: regulator@1 {
+               compatible = "regulator-fixed";
+               regulator-name = "VDD_3V3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               regulator-boot-on;
+               regulator-always-on;
+               vin-supply = <&reg_sys_5v>;
+       };
+
+       reg_5v_hub: regulator@2 {
+               compatible = "regulator-fixed";
+               regulator-name = "5V_HUB";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               regulator-boot-on;
+               gpio = <&gpio0 7 0>;
+               regulator-always-on;
+               vin-supply = <&reg_sys_5v>;
+       };
+
+       wl1835_pwrseq: wl1835-pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               /* WLAN_EN GPIO */
+               reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+               clocks = <&pmic>;
+               clock-names = "ext_clock";
+               power-off-delay-us = <10>;
+       };
+
        soc {
                spi0: spi@f7106000 {
                        status = "ok";
 
                /* GPIO blocks 16 thru 19 do not appear to be routed to pins */
 
+               dwmmc_0: dwmmc0@f723d000 {
+                       cap-mmc-highspeed;
+                       non-removable;
+                       bus-width = <0x8>;
+                       vmmc-supply = <&ldo19>;
+               };
+
+               dwmmc_1: dwmmc1@f723e000 {
+                       card-detect-delay = <200>;
+                       cap-sd-highspeed;
+                       sd-uhs-sdr12;
+                       sd-uhs-sdr25;
+                       sd-uhs-sdr50;
+                       vqmmc-supply = <&ldo7>;
+                       vmmc-supply = <&ldo10>;
+                       bus-width = <0x4>;
+                       disable-wp;
+                       cd-gpios = <&gpio1 0 1>;
+               };
+
                dwmmc_2: dwmmc2@f723f000 {
-                       ti,non-removable;
+                       bus-width = <0x4>;
                        non-removable;
-                       /* WL_EN */
-                       vmmc-supply = <&wlan_en_reg>;
+                       vmmc-supply = <&reg_vdd_3v3>;
+                       mmc-pwrseq = <&wl1835_pwrseq>;
 
                        #address-cells = <0x1>;
                        #size-cells = <0x0>;
                                interrupts = <3 IRQ_TYPE_EDGE_RISING>;
                        };
                };
-
-               wlan_en_reg: regulator@1 {
-                       compatible = "regulator-fixed";
-                       regulator-name = "wlan-en-regulator";
-                       regulator-min-microvolt = <1800000>;
-                       regulator-max-microvolt = <1800000>;
-                       /* WLAN_EN GPIO */
-                       gpio = <&gpio0 5 0>;
-                       /* WLAN card specific delay */
-                       startup-delay-us = <70000>;
-                       enable-active-high;
-               };
        };
 
        leds {
        pmic: pmic@f8000000 {
                compatible = "hisilicon,hi655x-pmic";
                reg = <0x0 0xf8000000 0x0 0x1000>;
+               #clock-cells = <0>;
                interrupt-controller;
                #interrupt-cells = <2>;
                pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
index 1e5129b19280bf8195a9ac24e506ec096493b084..5013e4b2ea71930960021fff96b8528611964955 100644 (file)
                        status = "disabled";
                };
 
-               fixed_5v_hub: regulator@0 {
-                       compatible = "regulator-fixed";
-                       regulator-name = "fixed_5v_hub";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       regulator-boot-on;
-                       gpio = <&gpio0 7 0>;
-                       regulator-always-on;
-               };
-
                usb_phy: usbphy {
                        compatible = "hisilicon,hi6220-usb-phy";
                        #phy-cells = <0>;
-                       phy-supply = <&fixed_5v_hub>;
+                       phy-supply = <&reg_5v_hub>;
                        hisilicon,peripheral-syscon = <&sys_ctrl>;
                };
 
 
                dwmmc_0: dwmmc0@f723d000 {
                        compatible = "hisilicon,hi6220-dw-mshc";
-                       num-slots = <0x1>;
-                       cap-mmc-highspeed;
-                       non-removable;
                        reg = <0x0 0xf723d000 0x0 0x1000>;
                        interrupts = <0x0 0x48 0x4>;
                        clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
                        clock-names = "ciu", "biu";
                        resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
                        reset-names = "reset";
-                       bus-width = <0x8>;
-                       vmmc-supply = <&ldo19>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func
                                     &emmc_cfg_func &emmc_rst_cfg_func>;
 
                dwmmc_1: dwmmc1@f723e000 {
                        compatible = "hisilicon,hi6220-dw-mshc";
-                       num-slots = <0x1>;
-                       card-detect-delay = <200>;
                        hisilicon,peripheral-syscon = <&ao_ctrl>;
-                       cap-sd-highspeed;
-                       sd-uhs-sdr12;
-                       sd-uhs-sdr25;
-                       sd-uhs-sdr50;
                        reg = <0x0 0xf723e000 0x0 0x1000>;
                        interrupts = <0x0 0x49 0x4>;
                        #address-cells = <0x1>;
                        clock-names = "ciu", "biu";
                        resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
                        reset-names = "reset";
-                       vqmmc-supply = <&ldo7>;
-                       vmmc-supply = <&ldo10>;
-                       bus-width = <0x4>;
-                       disable-wp;
-                       cd-gpios = <&gpio1 0 1>;
                        pinctrl-names = "default", "idle";
                        pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>;
                        pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>;
 
                dwmmc_2: dwmmc2@f723f000 {
                        compatible = "hisilicon,hi6220-dw-mshc";
-                       num-slots = <0x1>;
                        reg = <0x0 0xf723f000 0x0 0x1000>;
                        interrupts = <0x0 0x4a 0x4>;
                        clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
                        clock-names = "ciu", "biu";
                        resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
                        reset-names = "reset";
-                       bus-width = <0x4>;
-                       broken-cd;
                        pinctrl-names = "default", "idle";
                        pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>;
                        pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>;
index ac8df5201cd656d70073bc03cd13436435b79c66..b4bc42ece7541154431a5855c4bbe0f984094445 100644 (file)
                        cpm_crypto: crypto@800000 {
                                compatible = "inside-secure,safexcel-eip197";
                                reg = <0x800000 0x200000>;
-                               interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
-                               | IRQ_TYPE_LEVEL_HIGH)>,
+                               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
index 7740a75a823084d027ffab1c02d221f3083dea87..6e2058847ddcd59ca9fd0d472bfb94f5331b00cd 100644 (file)
                        cps_crypto: crypto@800000 {
                                compatible = "inside-secure,safexcel-eip197";
                                reg = <0x800000 0x200000>;
-                               interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
-                               | IRQ_TYPE_LEVEL_HIGH)>,
+                               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
index 65cdd878cfbd603b323a08006872f5de90e90aee..97c123e09e45bfd80173029de0da0161dd4be0c7 100644 (file)
@@ -68,6 +68,7 @@ CONFIG_PCIE_QCOM=y
 CONFIG_PCIE_ARMADA_8K=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=m
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_ARM64_VA_BITS_48=y
@@ -208,6 +209,8 @@ CONFIG_BRCMFMAC=m
 CONFIG_WL18XX=m
 CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_PM8941_PWRKEY=y
@@ -263,6 +266,7 @@ CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
 CONFIG_SPI_S3C64XX=y
 CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
@@ -292,6 +296,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
 CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
 CONFIG_EXYNOS_THERMAL=y
+CONFIG_ROCKCHIP_THERMAL=m
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
 CONFIG_MESON_GXBB_WATCHDOG=m
@@ -300,12 +305,14 @@ CONFIG_RENESAS_WDT=y
 CONFIG_BCM2835_WDT=y
 CONFIG_MFD_CROS_EC=y
 CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
 CONFIG_MFD_EXYNOS_LPASS=m
 CONFIG_MFD_HI655X_PMIC=y
 CONFIG_MFD_MAX77620=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_RK808=y
 CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FAN53555=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_HI655X=y
@@ -473,8 +480,10 @@ CONFIG_ARCH_TEGRA_186_SOC=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
 CONFIG_PWM=y
 CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
 CONFIG_PWM_MESON=m
 CONFIG_PWM_ROCKCHIP=y
 CONFIG_PWM_SAMSUNG=y
@@ -484,6 +493,7 @@ CONFIG_PHY_HI6220_USB=y
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_ROCKCHIP_INNO_USB2=y
 CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
 CONFIG_PHY_XGENE=y
 CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_ARM_SCPI_PROTOCOL=y
index 0e99978da3f05013d145132950ad204a92e3e4b0..59cca1d6ec547270adbd56a4e2265b9f9fc34375 100644 (file)
@@ -23,9 +23,9 @@
 #define ACPI_MADT_GICC_LENGTH  \
        (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
 
-#define BAD_MADT_GICC_ENTRY(entry, end)                                                \
-       (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) ||       \
-        (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+#define BAD_MADT_GICC_ENTRY(entry, end)                                        \
+       (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
+       (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
 
 /* Basic configuration for ACPI */
 #ifdef CONFIG_ACPI
index 4f0e3ebfea4b4f6496a783bd172abc05e7ec1d4a..c7e3e6387a4910a6377d78e10caf98cc1c20243b 100644 (file)
@@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
                return NULL;
 
        root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
-       if (!root_ops)
+       if (!root_ops) {
+               kfree(ri);
                return NULL;
+       }
 
        ri->cfg = pci_acpi_setup_ecam_mapping(root);
        if (!ri->cfg) {
index a89bddefdacf9194373a201c6c6c4cf8c4ac87c0..139093fab3260debefb4da2fbd33e37778784298 100644 (file)
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
 #define vxtime_lock()          do {} while (0)
 #define vxtime_unlock()                do {} while (0)
 
+/* This attribute is used in include/linux/jiffies.h alongside with
+ * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
+ * for frv does not contain another section specification.
+ */
+#define __jiffy_arch_data      __attribute__((__section__(".data")))
+
 #endif
 
index 918d4c73e951d7815fc4063322c9ed8e493afb9f..5351e1f3950d158aaa5ff2590c32734862a79834 100644 (file)
@@ -120,7 +120,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
        struct thread_info *ti = task_thread_info(p);
        struct pt_regs *childregs, *regs = current_pt_regs();
        unsigned long childksp;
-       p->set_child_tid = p->clear_child_tid = NULL;
 
        childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
 
index f8da545854f979c33a7b3116d26d822caa46c494..106859ae27ffba114f9f4b0011151db0f65f98d4 100644 (file)
@@ -167,8 +167,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
 
        top_of_kernel_stack = sp;
 
-       p->set_child_tid = p->clear_child_tid = NULL;
-
        /* Locate userspace context on stack... */
        sp -= STACK_FRAME_OVERHEAD;     /* redzone */
        sp -= sizeof(struct pt_regs);
index f7c8f9972f618109209e4892512ed903f6b865f3..964da1891ea9cc6b5dc174131db1c7bbdc2f07f4 100644 (file)
@@ -380,22 +380,6 @@ source "arch/powerpc/platforms/Kconfig"
 
 menu "Kernel options"
 
-config PPC_DT_CPU_FTRS
-       bool "Device-tree based CPU feature discovery & setup"
-       depends on PPC_BOOK3S_64
-       default n
-       help
-         This enables code to use a new device tree binding for describing CPU
-         compatibility and features. Saying Y here will attempt to use the new
-         binding if the firmware provides it. Currently only the skiboot
-         firmware provides this binding.
-         If you're not sure say Y.
-
-config PPC_CPUFEATURES_ENABLE_UNKNOWN
-       bool "cpufeatures pass through unknown features to guest/userspace"
-       depends on PPC_DT_CPU_FTRS
-       default y
-
 config HIGHMEM
        bool "High memory support"
        depends on PPC32
index b4b5e6b671ca4dedc27fc35d59b30d4ad488e1c3..0c4e470571ca0faa74d3e9fa38fa57a384cab4bf 100644 (file)
@@ -8,7 +8,7 @@
 #define H_PTE_INDEX_SIZE  9
 #define H_PMD_INDEX_SIZE  7
 #define H_PUD_INDEX_SIZE  9
-#define H_PGD_INDEX_SIZE  12
+#define H_PGD_INDEX_SIZE  9
 
 #ifndef __ASSEMBLY__
 #define H_PTE_TABLE_SIZE       (sizeof(pte_t) << H_PTE_INDEX_SIZE)
index c2d509584a98070accd6be62519fa992e5bc1f3f..d02ad93bf70892f8d342b9d8890a4e1b8065eed6 100644 (file)
@@ -214,7 +214,6 @@ enum {
 #define CPU_FTR_DAWR                   LONG_ASM_CONST(0x0400000000000000)
 #define CPU_FTR_DABRX                  LONG_ASM_CONST(0x0800000000000000)
 #define CPU_FTR_PMAO_BUG               LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_SUBCORE                        LONG_ASM_CONST(0x2000000000000000)
 #define CPU_FTR_POWER9_DD1             LONG_ASM_CONST(0x4000000000000000)
 
 #ifndef __ASSEMBLY__
@@ -463,7 +462,7 @@ enum {
            CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
            CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
-           CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
+           CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
 #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
index a2123f291ab0c5c8dc13cc9364c3a12848a4bb2c..bb99b651085aaf292e5f98ee23c7cdc53d443cd2 100644 (file)
@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
 #define TASK_SIZE_128TB (0x0000800000000000UL)
 #define TASK_SIZE_512TB (0x0002000000000000UL)
 
-#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
 /*
  * Max value currently used:
  */
-#define TASK_SIZE_USER64       TASK_SIZE_512TB
+#define TASK_SIZE_USER64               TASK_SIZE_512TB
+#define DEFAULT_MAP_WINDOW_USER64      TASK_SIZE_128TB
 #else
-#define TASK_SIZE_USER64       TASK_SIZE_64TB
+#define TASK_SIZE_USER64               TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64      TASK_SIZE_64TB
 #endif
 
 /*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
  * space during mmap's.
  */
 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
 
 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
                TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,21 +148,15 @@ void release_thread(struct task_struct *);
  * with 128TB and conditionally enable upto 512TB
  */
 #ifdef CONFIG_PPC_BOOK3S_64
-#define DEFAULT_MAP_WINDOW     ((is_32bit_task()) ? \
-                                TASK_SIZE_USER32 : TASK_SIZE_128TB)
+#define DEFAULT_MAP_WINDOW     ((is_32bit_task()) ?                    \
+                                TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
 #else
 #define DEFAULT_MAP_WINDOW     TASK_SIZE
 #endif
 
 #ifdef __powerpc64__
 
-#ifdef CONFIG_PPC_BOOK3S_64
-/* Limit stack to 128TB */
-#define STACK_TOP_USER64 TASK_SIZE_128TB
-#else
-#define STACK_TOP_USER64 TASK_SIZE_USER64
-#endif
-
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
 #define STACK_TOP_USER32 TASK_SIZE_USER32
 
 #define STACK_TOP (is_32bit_task() ? \
index 8b3b46b7b0f2795b6195eb95ee649d3dece6dc9a..329771559cbbb16048d67d27450865703a248c90 100644 (file)
@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
 extern int sysfs_add_device_to_node(struct device *dev, int nid);
 extern void sysfs_remove_device_from_node(struct device *dev, int nid);
 
+static inline int early_cpu_to_node(int cpu)
+{
+       int nid;
+
+       nid = numa_cpu_lookup_table[cpu];
+
+       /*
+        * Fall back to node 0 if nid is unset (it should be, except bugs).
+        * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+        */
+       return (nid < 0) ? 0 : nid;
+}
 #else
 
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
 static inline void dump_numa_cpu_topology(void) {}
 
 static inline int sysfs_add_device_to_node(struct device *dev, int nid)
index 3e7ce86d5c1330986289a72b0c38ea2686f6d6e6..4d877144f3773b3aa4dcefd9deeb12b13bcfe136 100644 (file)
@@ -46,6 +46,8 @@
 #define PPC_FEATURE2_HTM_NOSC          0x01000000
 #define PPC_FEATURE2_ARCH_3_00         0x00800000 /* ISA 3.00 */
 #define PPC_FEATURE2_HAS_IEEE128       0x00400000 /* VSX IEEE Binary Float 128-bit */
+#define PPC_FEATURE2_DARN              0x00200000 /* darn random number insn */
+#define PPC_FEATURE2_SCV               0x00100000 /* scv syscall */
 
 /*
  * IMPORTANT!
index 9b3e88b1a9c83b041d9bd84ce9d17fdc87aaeaf7..6f849832a66914f520d59c38a3c6c28f405de904 100644 (file)
@@ -124,7 +124,8 @@ extern void __restore_cpu_e6500(void);
 #define COMMON_USER_POWER9     COMMON_USER_POWER8
 #define COMMON_USER2_POWER9    (COMMON_USER2_POWER8 | \
                                 PPC_FEATURE2_ARCH_3_00 | \
-                                PPC_FEATURE2_HAS_IEEE128)
+                                PPC_FEATURE2_HAS_IEEE128 | \
+                                PPC_FEATURE2_DARN )
 
 #ifdef CONFIG_PPC_BOOK3E_64
 #define COMMON_USER_BOOKE      (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
index fcc7588a96d694265899935eae34521eda29690d..4c7656dc4e04f09bed8b9bbc8d8f979876237202 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/jump_label.h>
+#include <linux/libfdt.h>
 #include <linux/memblock.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
@@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
        {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
        {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
        {"processor-utilization-of-resources-register", feat_enable_purr, 0},
-       {"subcore", feat_enable, CPU_FTR_SUBCORE},
        {"no-execute", feat_enable, 0},
        {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
        {"cache-inhibited-large-page", feat_enable_large_ci, 0},
@@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
        {"wait-v3", feat_enable, 0},
 };
 
-/* XXX: how to configure this? Default + boot time? */
-#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
-#define CPU_FEATURE_ENABLE_UNKNOWN 1
-#else
-#define CPU_FEATURE_ENABLE_UNKNOWN 0
-#endif
+static bool __initdata using_dt_cpu_ftrs;
+static bool __initdata enable_unknown = true;
+
+static int __init dt_cpu_ftrs_parse(char *str)
+{
+       if (!str)
+               return 0;
+
+       if (!strcmp(str, "off"))
+               using_dt_cpu_ftrs = false;
+       else if (!strcmp(str, "known"))
+               enable_unknown = false;
+       else
+               return 1;
+
+       return 0;
+}
+early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
 
 static void __init cpufeatures_setup_start(u32 isa)
 {
@@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
                }
        }
 
-       if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
+       if (!known && enable_unknown) {
                if (!feat_try_enable_unknown(f)) {
                        pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
                                f->name);
@@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
                cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
 }
 
+static int __init disabled_on_cmdline(void)
+{
+       unsigned long root, chosen;
+       const char *p;
+
+       root = of_get_flat_dt_root();
+       chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+       if (chosen == -FDT_ERR_NOTFOUND)
+               return false;
+
+       p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
+       if (!p)
+               return false;
+
+       if (strstr(p, "dt_cpu_ftrs=off"))
+               return true;
+
+       return false;
+}
+
 static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
                                        int depth, void *data)
 {
@@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
        return 0;
 }
 
-static bool __initdata using_dt_cpu_ftrs = false;
-
 bool __init dt_cpu_ftrs_in_use(void)
 {
        return using_dt_cpu_ftrs;
@@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
 
 bool __init dt_cpu_ftrs_init(void *fdt)
 {
+       using_dt_cpu_ftrs = false;
+
        /* Setup and verify the FDT, if it fails we just bail */
        if (!early_init_dt_verify(fdt))
                return false;
@@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
        if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
                return false;
 
+       if (disabled_on_cmdline())
+               return false;
+
        cpufeatures_setup_cpu();
 
        using_dt_cpu_ftrs = true;
@@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
 
 void __init dt_cpu_ftrs_scan(void)
 {
+       if (!using_dt_cpu_ftrs)
+               return;
+
        of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
 }
index baae104b16c7ba9f7cdf4a305ab5227ebf002467..2ad725ef4368a3e525681b0ce4a56ef8e960bf48 100644 (file)
@@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 #ifdef CONFIG_VSX
        current->thread.used_vsr = 0;
 #endif
+       current->thread.load_fp = 0;
        memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
        current->thread.fp_save_area = NULL;
 #ifdef CONFIG_ALTIVEC
@@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        current->thread.vr_save_area = NULL;
        current->thread.vrsave = 0;
        current->thread.used_vr = 0;
+       current->thread.load_vec = 0;
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
        memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        current->thread.tm_tfhar = 0;
        current->thread.tm_texasr = 0;
        current->thread.tm_tfiar = 0;
+       current->thread.load_tm = 0;
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 }
 EXPORT_SYMBOL(start_thread);
index 40c4887c27b613d73bc420b4aca34cee406325a8..f830562974417bfd0f338766b59a6ffd7a09a176 100644 (file)
@@ -161,7 +161,9 @@ static struct ibm_pa_feature {
        { .pabyte = 0,  .pabit = 3, .cpu_features  = CPU_FTR_CTRL },
        { .pabyte = 0,  .pabit = 6, .cpu_features  = CPU_FTR_NOEXECUTE },
        { .pabyte = 1,  .pabit = 2, .mmu_features  = MMU_FTR_CI_LARGE_PAGE },
+#ifdef CONFIG_PPC_RADIX_MMU
        { .pabyte = 40, .pabit = 0, .mmu_features  = MMU_FTR_TYPE_RADIX },
+#endif
        { .pabyte = 1,  .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
        { .pabyte = 5,  .pabit = 0, .cpu_features  = CPU_FTR_REAL_LE,
                                    .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
index 71dcda91755d51a2e5705f29308239e7e9c7e506..857129acf960a1bf93c0c5791a6de1d84585caa3 100644 (file)
@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
 
 #ifdef CONFIG_PPC_MM_SLICES
 #ifdef CONFIG_PPC64
-       init_mm.context.addr_limit = TASK_SIZE_128TB;
+       init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 #else
 #error "context.addr_limit not initialized."
 #endif
index f35ff9dea4fb4607459c10d42a29c47f2984e613..a8c1f99e96072530cb1f2d9ed702dffd78665720 100644 (file)
@@ -661,7 +661,7 @@ void __init emergency_stack_init(void)
 
 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 {
-       return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+       return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
                                    __pa(MAX_DMA_ADDRESS));
 }
 
@@ -672,7 +672,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
 
 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
-       if (cpu_to_node(from) == cpu_to_node(to))
+       if (early_cpu_to_node(from) == early_cpu_to_node(to))
                return LOCAL_DISTANCE;
        else
                return REMOTE_DISTANCE;
index c6dca2ae78ef9f1225dd6a13e0997034132315a4..a3edf813d4556c547e5b00155c5f9a0dc411872d 100644 (file)
@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
         * mm->context.addr_limit. Default to max task size so that we copy the
         * default values to paca which will help us to handle slb miss early.
         */
-       mm->context.addr_limit = TASK_SIZE_128TB;
+       mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 
        /*
         * The old code would re-promote on fork, we don't do that when using
index 018f8e90ac35fd19bf37bdbb99438f970621634a..bb28e1a412576ea15492be658a5bf78ee75950a5 100644 (file)
@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
        .name                   = "POWER9",
        .n_counter              = MAX_PMU_COUNTERS,
        .add_fields             = ISA207_ADD_FIELDS,
-       .test_adder             = ISA207_TEST_ADDER,
+       .test_adder             = P9_DD1_TEST_ADDER,
        .compute_mmcr           = isa207_compute_mmcr,
        .config_bhrb            = power9_config_bhrb,
        .bhrb_filter_map        = power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
        .name                   = "POWER9",
        .n_counter              = MAX_PMU_COUNTERS,
        .add_fields             = ISA207_ADD_FIELDS,
-       .test_adder             = P9_DD1_TEST_ADDER,
+       .test_adder             = ISA207_TEST_ADDER,
        .compute_mmcr           = isa207_compute_mmcr,
        .config_bhrb            = power9_config_bhrb,
        .bhrb_filter_map        = power9_bhrb_filter_map,
index 33244e3d9375eae3ccd1224b7dbac87b3822f92a..4fd64d3f5c4429206b8c838ca99387e6668aee11 100644 (file)
@@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
 
          In case of doubt, say Y
 
+config PPC_DT_CPU_FTRS
+       bool "Device-tree based CPU feature discovery & setup"
+       depends on PPC_BOOK3S_64
+       default y
+       help
+         This enables code to use a new device tree binding for describing CPU
+         compatibility and features. Saying Y here will attempt to use the new
+         binding if the firmware provides it. Currently only the skiboot
+         firmware provides this binding.
+         If you're not sure say Y.
+
 config UDBG_RTAS_CONSOLE
        bool "RTAS based debug console"
        depends on PPC_RTAS
index 96c2b8a406303194737962905330c0a7a3900944..0c45cdbac4cfc80bcb79e287486d5a8a85fefdef 100644 (file)
@@ -197,7 +197,9 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
            (REGION_ID(ea) != USER_REGION_ID)) {
 
                spin_unlock(&spu->register_lock);
-               ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr);
+               ret = hash_page(ea,
+                               _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
+                               0x300, dsisr);
                spin_lock(&spu->register_lock);
 
                if (!ret) {
index e5a891ae80ee5e6881bd10be148c04519d3dffd0..84b7ac926ce65682e83781ec6ab4ac97b6d0727f 100644 (file)
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
        skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
        if (!dump_skip(cprm, skip))
                goto Eio;
+
+       rc = 0;
 out:
        free_page((unsigned long)buf);
        return rc;
index 067defeea6911303e7fd59206e239fa7d6d770eb..78fa9395b8c55c3dab2c10e1a245f5a7a04be8a2 100644 (file)
@@ -714,7 +714,7 @@ static void pnv_npu2_release_context(struct kref *kref)
 void pnv_npu2_destroy_context(struct npu_context *npu_context,
                        struct pci_dev *gpdev)
 {
-       struct pnv_phb *nphb, *phb;
+       struct pnv_phb *nphb;
        struct npu *npu;
        struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
        struct device_node *nvlink_dn;
@@ -728,13 +728,12 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
 
        nphb = pci_bus_to_host(npdev->bus)->private_data;
        npu = &nphb->npu;
-       phb = pci_bus_to_host(gpdev->bus)->private_data;
        nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
        if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
                                                        &nvlink_index)))
                return;
        npu_context->npdev[npu->index][nvlink_index] = NULL;
-       opal_npu_destroy_context(phb->opal_id, npu_context->mm->context.id,
+       opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
                                PCI_DEVID(gpdev->bus->number, gpdev->devfn));
        kref_put(&npu_context->kref, pnv_npu2_release_context);
 }
index 0babef11136fc8daba7f2666fed3f3b649cc2bd8..8c6119280c1306afd399d2c86ce88381882ab5df 100644 (file)
@@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
 
 static int subcore_init(void)
 {
-       if (!cpu_has_feature(CPU_FTR_SUBCORE))
+       unsigned pvr_ver;
+
+       pvr_ver = PVR_VER(mfspr(SPRN_PVR));
+
+       if (pvr_ver != PVR_POWER8 &&
+           pvr_ver != PVR_POWER8E &&
+           pvr_ver != PVR_POWER8NVL)
                return 0;
 
        /*
index e104c71ea44ab5bf715fa5720c0a1ed47c6221c7..1fb162ba9d1c6aaa730123d07258a3923b3d245d 100644 (file)
@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
        for (i = 0; i < num_lmbs; i++) {
                lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
                lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+               lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
                lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
        }
 
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
        for (i = 0; i < num_lmbs; i++) {
                lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
                lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+               lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
                lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
        }
 
index ef470b470b04ae85488d1a9ffcbe27519884a4db..6afddae2fb4796dc61de3258f683bcc158b15a2e 100644 (file)
@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 
 static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
+       struct u8_gpio_chip *u8_gc =
+               container_of(mm_gc, struct u8_gpio_chip, mm_gc);
 
        u8_gc->data = in_8(mm_gc->regs);
 }
index 58243b0d21c006cfea9c47723221a33b7673fae9..b558c9e29de37f3ebeb18593c1a3d234c7d9d060 100644 (file)
@@ -192,9 +192,9 @@ config NR_CPUS
        int "Maximum number of CPUs"
        depends on SMP
        range 2 32 if SPARC32
-       range 2 1024 if SPARC64
+       range 2 4096 if SPARC64
        default 32 if SPARC32
-       default 64 if SPARC64
+       default 4096 if SPARC64
 
 source kernel/Kconfig.hz
 
@@ -295,9 +295,13 @@ config NUMA
        depends on SPARC64 && SMP
 
 config NODES_SHIFT
-       int
-       default "4"
+       int "Maximum NUMA Nodes (as a power of 2)"
+       range 4 5 if SPARC64
+       default "5"
        depends on NEED_MULTIPLE_NODES
+       help
+         Specify the maximum number of NUMA Nodes available on the target
+         system.  Increases memory reserved to accommodate various tables.
 
 # Some NUMA nodes have memory ranges that span
 # other nodes.  Even though a pfn is valid and
index f7de0dbc38af2dd36c9f34df53e6e951f6729825..83b36a5371ffc62e80fa694e077867f4f8b1f49f 100644 (file)
@@ -52,7 +52,7 @@
 #define CTX_NR_MASK            TAG_CONTEXT_BITS
 #define CTX_HW_MASK            (CTX_NR_MASK | CTX_PGSZ_MASK)
 
-#define CTX_FIRST_VERSION      ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
+#define CTX_FIRST_VERSION      BIT(CTX_VERSION_SHIFT)
 #define CTX_VALID(__ctx)       \
         (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
 #define CTX_HWBITS(__ctx)      ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
index 22fede6eba116020cf7049e2f45a57545a6d55cb..2cddcda4f85f7555dced053b1fc82fd991e19943 100644 (file)
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
 extern unsigned long tlb_context_cache;
 extern unsigned long mmu_context_bmap[];
 
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
 void get_new_mmu_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-void smp_new_mmu_context_version(void);
-#else
-#define smp_new_mmu_context_version() do { } while (0)
-#endif
-
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
 
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
 {
        unsigned long ctx_valid, flags;
-       int cpu;
+       int cpu = smp_processor_id();
 
+       per_cpu(per_cpu_secondary_mm, cpu) = mm;
        if (unlikely(mm == &init_mm))
                return;
 
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
         * for the first time, we must flush that context out of the
         * local TLB.
         */
-       cpu = smp_processor_id();
        if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
                cpumask_set_cpu(cpu, mm_cpumask(mm));
                __flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
 }
 
 #define deactivate_mm(tsk,mm)  do { } while (0)
-
-/* Activate a new MM instance for the current task. */
-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
-       unsigned long flags;
-       int cpu;
-
-       spin_lock_irqsave(&mm->context.lock, flags);
-       if (!CTX_VALID(mm->context))
-               get_new_mmu_context(mm);
-       cpu = smp_processor_id();
-       if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
-               cpumask_set_cpu(cpu, mm_cpumask(mm));
-
-       load_secondary_context(mm);
-       __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
-       tsb_context_switch(mm);
-       spin_unlock_irqrestore(&mm->context.lock, flags);
-}
-
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(__SPARC64_MMU_CONTEXT_H) */
index 2669370305465d7a4d1c2f9c5a7c9eae2f66474d..522b43db2ed336a5d34dee17b4e8c3b593e6ee5d 100644 (file)
@@ -20,7 +20,6 @@
 #define PIL_SMP_CALL_FUNC      1
 #define PIL_SMP_RECEIVE_SIGNAL 2
 #define PIL_SMP_CAPTURE                3
-#define PIL_SMP_CTX_NEW_VERSION        4
 #define PIL_DEVICE_IRQ         5
 #define PIL_SMP_CALL_FUNC_SNGL 6
 #define PIL_DEFERRED_PCR_WORK  7
index 8174f6cdbbbbd87af5bdbcb923352ddadb4e237e..9dca7a892978a49d234a2b9325228b2c900d1276 100644 (file)
@@ -327,6 +327,7 @@ struct vio_dev {
        int                     compat_len;
 
        u64                     dev_no;
+       u64                     id;
 
        unsigned long           channel_id;
 
index b542cc7c8d94d8fc75319091f91ba5dc25251fd9..f87265afb1759e16b735e95067044f827dff27e7 100644 (file)
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
                pbuf.req.handle = cp->handle;
                pbuf.req.major = 1;
                pbuf.req.minor = 0;
-               strcpy(pbuf.req.svc_id, cp->service_id);
+               strcpy(pbuf.id_buf, cp->service_id);
 
                err = __ds_send(lp, &pbuf, msg_len);
                if (err > 0)
index 4d0248aa0928695597161d93f325a49311b43e2c..99dd133a029f06528f78b2b811d81c97fc7cf9f4 100644 (file)
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
 {
 #ifdef CONFIG_SMP
        unsigned long page;
+       void *mondo, *p;
 
-       BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+       BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+       /* Make sure mondo block is 64byte aligned */
+       p = kzalloc(127, GFP_KERNEL);
+       if (!p) {
+               prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+               prom_halt();
+       }
+       mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+       tb->cpu_mondo_block_pa = __pa(mondo);
 
        page = get_zeroed_page(GFP_KERNEL);
        if (!page) {
-               prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+               prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
                prom_halt();
        }
 
-       tb->cpu_mondo_block_pa = __pa(page);
-       tb->cpu_list_pa = __pa(page + 64);
+       tb->cpu_list_pa = __pa(page);
 #endif
 }
 
index c9804551262cc2832ea35b0d1285dc3051319fd4..6ae1e77be0bfde27696595a1f33e939935d83321 100644 (file)
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
 /* smp_64.c */
 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
 
index b3bc0ac757cc11c0c77e106a447817b89d821cae..fdf31040a7dc5cc460de6d60c9724a60933b38dc 100644 (file)
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        preempt_enable();
 }
 
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
-       struct mm_struct *mm;
-       unsigned long flags;
-
-       clear_softint(1 << irq);
-
-       /* See if we need to allocate a new TLB context because
-        * the version of the one we are using is now out of date.
-        */
-       mm = current->active_mm;
-       if (unlikely(!mm || (mm == &init_mm)))
-               return;
-
-       spin_lock_irqsave(&mm->context.lock, flags);
-
-       if (unlikely(!CTX_VALID(mm->context)))
-               get_new_mmu_context(mm);
-
-       spin_unlock_irqrestore(&mm->context.lock, flags);
-
-       load_secondary_context(mm);
-       __flush_tlb_mm(CTX_HWBITS(mm->context),
-                      SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
-       smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
 #ifdef CONFIG_KGDB
 void kgdb_roundup_cpus(unsigned long flags)
 {
index 10689cfd0ad40e6b12ae6b148f99ac2f5c7deb64..07c0df92496034efd1262dd2b40e56ffd5486c0c 100644 (file)
@@ -455,13 +455,16 @@ __tsb_context_switch:
        .type   copy_tsb,#function
 copy_tsb:              /* %o0=old_tsb_base, %o1=old_tsb_size
                         * %o2=new_tsb_base, %o3=new_tsb_size
+                        * %o4=page_size_shift
                         */
        sethi           %uhi(TSB_PASS_BITS), %g7
        srlx            %o3, 4, %o3
-       add             %o0, %o1, %g1   /* end of old tsb */
+       add             %o0, %o1, %o1   /* end of old tsb */
        sllx            %g7, 32, %g7
        sub             %o3, 1, %o3     /* %o3 == new tsb hash mask */
 
+       mov             %o4, %g1        /* page_size_shift */
+
 661:   prefetcha       [%o0] ASI_N, #one_read
        .section        .tsb_phys_patch, "ax"
        .word           661b
@@ -486,9 +489,9 @@ copy_tsb:           /* %o0=old_tsb_base, %o1=old_tsb_size
        /* This can definitely be computed faster... */
        srlx            %o0, 4, %o5     /* Build index */
        and             %o5, 511, %o5   /* Mask index */
-       sllx            %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+       sllx            %o5, %g1, %o5   /* Put into vaddr position */
        or              %o4, %o5, %o4   /* Full VADDR. */
-       srlx            %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+       srlx            %o4, %g1, %o4   /* Shift down to create index */
        and             %o4, %o3, %o4   /* Mask with new_tsb_nents-1 */
        sllx            %o4, 4, %o4     /* Shift back up into tsb ent offset */
        TSB_STORE(%o2 + %o4, %g2)       /* Store TAG */
@@ -496,7 +499,7 @@ copy_tsb:           /* %o0=old_tsb_base, %o1=old_tsb_size
        TSB_STORE(%o2 + %o4, %g3)       /* Store TTE */
 
 80:    add             %o0, 16, %o0
-       cmp             %o0, %g1
+       cmp             %o0, %o1
        bne,pt          %xcc, 90b
         nop
 
index 7bd8f6556352d91cdc30e18d590e7421ffba6465..efe93ab4a9c0654143d52569c7157f9117e7ae37 100644 (file)
@@ -50,7 +50,7 @@ tl0_resv03e:  BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
 tl0_irq1:      TRAP_IRQ(smp_call_function_client, 1)
 tl0_irq2:      TRAP_IRQ(smp_receive_signal_client, 2)
 tl0_irq3:      TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4:      TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4:       BTRAP(0x44)
 #else
 tl0_irq1:      BTRAP(0x41)
 tl0_irq2:      BTRAP(0x42)
index f6bb857254fcfa170155d4cd8dc8cb717c5bfb97..075d38980dee394fdb32f86e130d0b3ec37ebfeb 100644 (file)
@@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
        if (!id) {
                dev_set_name(&vdev->dev, "%s", bus_id_name);
                vdev->dev_no = ~(u64)0;
+               vdev->id = ~(u64)0;
        } else if (!cfg_handle) {
                dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
                vdev->dev_no = *id;
+               vdev->id = ~(u64)0;
        } else {
                dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
                             *cfg_handle, *id);
                vdev->dev_no = *cfg_handle;
+               vdev->id = *id;
        }
 
        vdev->dev.parent = parent;
@@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
        (void) vio_create_one(hp, node, &root_vdev->dev);
 }
 
+struct vio_md_node_query {
+       const char *type;
+       u64 dev_no;
+       u64 id;
+};
+
 static int vio_md_node_match(struct device *dev, void *arg)
 {
+       struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
        struct vio_dev *vdev = to_vio_dev(dev);
 
-       if (vdev->mp == (u64) arg)
-               return 1;
+       if (vdev->dev_no != query->dev_no)
+               return 0;
+       if (vdev->id != query->id)
+               return 0;
+       if (strcmp(vdev->type, query->type))
+               return 0;
 
-       return 0;
+       return 1;
 }
 
 static void vio_remove(struct mdesc_handle *hp, u64 node)
 {
+       const char *type;
+       const u64 *id, *cfg_handle;
+       u64 a;
+       struct vio_md_node_query query;
        struct device *dev;
 
-       dev = device_find_child(&root_vdev->dev, (void *) node,
+       type = mdesc_get_property(hp, node, "device-type", NULL);
+       if (!type) {
+               type = mdesc_get_property(hp, node, "name", NULL);
+               if (!type)
+                       type = mdesc_node_name(hp, node);
+       }
+
+       query.type = type;
+
+       id = mdesc_get_property(hp, node, "id", NULL);
+       cfg_handle = NULL;
+       mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+               u64 target;
+
+               target = mdesc_arc_target(hp, a);
+               cfg_handle = mdesc_get_property(hp, target,
+                                               "cfg-handle", NULL);
+               if (cfg_handle)
+                       break;
+       }
+
+       if (!id) {
+               query.dev_no = ~(u64)0;
+               query.id = ~(u64)0;
+       } else if (!cfg_handle) {
+               query.dev_no = *id;
+               query.id = ~(u64)0;
+       } else {
+               query.dev_no = *cfg_handle;
+               query.id = *id;
+       }
+
+       dev = device_find_child(&root_vdev->dev, &query,
                                vio_md_node_match);
        if (dev) {
                printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
 
                device_unregister(dev);
                put_device(dev);
+       } else {
+               if (!id)
+                       printk(KERN_ERR "VIO: Removed unknown %s node.\n",
+                              type);
+               else if (!cfg_handle)
+                       printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
+                              type, *id);
+               else
+                       printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
+                              type, *cfg_handle, *id);
        }
 }
 
index 69912d2f8b54e903ef040b346371cc27204b9d15..07c03e72d81248cebe9c3d48bbc93e1e2de455b7 100644 (file)
@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
 lib-$(CONFIG_SPARC64) += atomic_64.o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
+lib-$(CONFIG_SPARC64) += multi3.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644 (file)
index 0000000..d6b6c97
--- /dev/null
@@ -0,0 +1,35 @@
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+       .text
+       .align  4
+ENTRY(__multi3) /* %o0 = u, %o1 = v */
+       mov     %o1, %g1
+       srl     %o3, 0, %g4
+       mulx    %g4, %g1, %o1
+       srlx    %g1, 0x20, %g3
+       mulx    %g3, %g4, %g5
+       sllx    %g5, 0x20, %o5
+       srl     %g1, 0, %g4
+       sub     %o1, %o5, %o5
+       srlx    %o5, 0x20, %o5
+       addcc   %g5, %o5, %g5
+       srlx    %o3, 0x20, %o5
+       mulx    %g4, %o5, %g4
+       mulx    %g3, %o5, %o5
+       sethi   %hi(0x80000000), %g3
+       addcc   %g5, %g4, %g5
+       srlx    %g5, 0x20, %g5
+       add     %g3, %g3, %g3
+       movcc   %xcc, %g0, %g3
+       addcc   %o5, %g5, %o5
+       sllx    %g4, 0x20, %g4
+       add     %o1, %g4, %o1
+       add     %o5, %g3, %g2
+       mulx    %g1, %o2, %g1
+       add     %g1, %g2, %g1
+       mulx    %o0, %o3, %o0
+       retl
+        add    %g1, %o0, %o0
+ENDPROC(__multi3)
+EXPORT_SYMBOL(__multi3)
index 0cda653ae007645fa01f05b4c40518332159a6ac..3c40ebd50f928cbbbfe69c65c35810a78b30c53d 100644 (file)
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
        }
 
        if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
-               pr_warn("hugepagesz=%llu not supported by MMU.\n",
+               hugetlb_bad_size();
+               pr_err("hugepagesz=%llu not supported by MMU.\n",
                        hugepage_size);
                goto out;
        }
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
 
 /* get_new_mmu_context() uses "cache + 1".  */
 DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
 #define MAX_CTX_NR     (1UL << CTX_NR_BITS)
 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+       unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+       unsigned long new_ver, new_ctx, old_ctx;
+       struct mm_struct *mm;
+       int cpu;
+
+       bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+       /* Reserve kernel context */
+       set_bit(0, mmu_context_bmap);
+
+       new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+       if (unlikely(new_ver == 0))
+               new_ver = CTX_FIRST_VERSION;
+       tlb_context_cache = new_ver;
+
+       /*
+        * Make sure that any new mm that are added into per_cpu_secondary_mm,
+        * are going to go through get_new_mmu_context() path.
+        */
+       mb();
+
+       /*
+        * Updated versions to current on those CPUs that had valid secondary
+        * contexts
+        */
+       for_each_online_cpu(cpu) {
+               /*
+                * If a new mm is stored after we took this mm from the array,
+                * it will go into get_new_mmu_context() path, because we
+                * already bumped the version in tlb_context_cache.
+                */
+               mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+               if (unlikely(!mm || mm == &init_mm))
+                       continue;
+
+               old_ctx = mm->context.sparc64_ctx_val;
+               if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+                       new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+                       set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+                       mm->context.sparc64_ctx_val = new_ctx;
+               }
+       }
+}
 
 /* Caller does TLB context flushing on local CPU if necessary.
  * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
 {
        unsigned long ctx, new_ctx;
        unsigned long orig_pgsz_bits;
-       int new_version;
 
        spin_lock(&ctx_alloc_lock);
+retry:
+       /* wrap might have happened, test again if our context became valid */
+       if (unlikely(CTX_VALID(mm->context)))
+               goto out;
        orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
        ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
        new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
-       new_version = 0;
        if (new_ctx >= (1 << CTX_NR_BITS)) {
                new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
                if (new_ctx >= ctx) {
-                       int i;
-                       new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
-                               CTX_FIRST_VERSION;
-                       if (new_ctx == 1)
-                               new_ctx = CTX_FIRST_VERSION;
-
-                       /* Don't call memset, for 16 entries that's just
-                        * plain silly...
-                        */
-                       mmu_context_bmap[0] = 3;
-                       mmu_context_bmap[1] = 0;
-                       mmu_context_bmap[2] = 0;
-                       mmu_context_bmap[3] = 0;
-                       for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
-                               mmu_context_bmap[i + 0] = 0;
-                               mmu_context_bmap[i + 1] = 0;
-                               mmu_context_bmap[i + 2] = 0;
-                               mmu_context_bmap[i + 3] = 0;
-                       }
-                       new_version = 1;
-                       goto out;
+                       mmu_context_wrap();
+                       goto retry;
                }
        }
+       if (mm->context.sparc64_ctx_val)
+               cpumask_clear(mm_cpumask(mm));
        mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
        new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
        tlb_context_cache = new_ctx;
        mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
        spin_unlock(&ctx_alloc_lock);
-
-       if (unlikely(new_version))
-               smp_new_mmu_context_version();
 }
 
 static int numa_enabled = 1;
index bedf08b22a4773c5a104b56f7de8b44c461630c1..0d4b998c7d7b74a9e930f12d735591f14769befd 100644 (file)
@@ -496,7 +496,8 @@ retry_tsb_alloc:
                extern void copy_tsb(unsigned long old_tsb_base,
                                     unsigned long old_tsb_size,
                                     unsigned long new_tsb_base,
-                                    unsigned long new_tsb_size);
+                                    unsigned long new_tsb_size,
+                                    unsigned long page_size_shift);
                unsigned long old_tsb_base = (unsigned long) old_tsb;
                unsigned long new_tsb_base = (unsigned long) new_tsb;
 
@@ -504,7 +505,9 @@ retry_tsb_alloc:
                        old_tsb_base = __pa(old_tsb_base);
                        new_tsb_base = __pa(new_tsb_base);
                }
-               copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
+               copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+                       tsb_index == MM_TSB_BASE ?
+                       PAGE_SHIFT : REAL_HPAGE_SHIFT);
        }
 
        mm->context.tsb_block[tsb_index].tsb = new_tsb;
index 5d2fd6cd31896b87a3373a59cbfc3130808c6908..fcf4d27a38fb47af30d026079022d07bb803e323 100644 (file)
@@ -971,11 +971,6 @@ xcall_capture:
        wr              %g0, (1 << PIL_SMP_CAPTURE), %set_softint
        retry
 
-       .globl          xcall_new_mmu_context_version
-xcall_new_mmu_context_version:
-       wr              %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
-       retry
-
 #ifdef CONFIG_KGDB
        .globl          xcall_kgdb_capture
 xcall_kgdb_capture:
index cd18994a9555ab7ec628da2bdc909dd445fef1a2..4ccfacc7232ab1ace21b8466ae73e4c7d18d3fba 100644 (file)
@@ -360,7 +360,7 @@ config SMP
          Management" code will be disabled if you say Y here.
 
          See also <file:Documentation/x86/i386/IO-APIC.txt>,
-         <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+         <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO available at
          <http://www.tldp.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
index 5851411e60fb9fd008f0035ac92932bd03eb519b..bf240b9204738598e78ece3f99e85d8f21385ea4 100644 (file)
@@ -159,7 +159,7 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
        # If '-Os' is enabled, disable it and print a warning.
         ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
           undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
-         $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE.  Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
+          $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE.  Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
         endif
 
     endif
index 44163e8c3868ec4dda58fed54f38411134224089..2c860ad4fe0686a9cf796cd9a5b4453b8199223f 100644 (file)
@@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
 quiet_cmd_check_data_rel = DATAREL $@
 define cmd_check_data_rel
        for obj in $(filter %.o,$^); do \
-               readelf -S $$obj | grep -qF .rel.local && { \
+               ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
                        echo "error: $$obj has data relocations!" >&2; \
                        exit 1; \
                } || true; \
index 50bc26949e9edf75cc5978f8a08a4273bf489aec..48ef7bb32c4269331ceb2332b8e6bf5faffcb3de 100644 (file)
@@ -251,6 +251,23 @@ ENTRY(__switch_to_asm)
        jmp     __switch_to
 END(__switch_to_asm)
 
+/*
+ * The unwinder expects the last frame on the stack to always be at the same
+ * offset from the end of the page, which allows it to validate the stack.
+ * Calling schedule_tail() directly would break that convention because its an
+ * asmlinkage function so its argument has to be pushed on the stack.  This
+ * wrapper creates a proper "end of stack" frame header before the call.
+ */
+ENTRY(schedule_tail_wrapper)
+       FRAME_BEGIN
+
+       pushl   %eax
+       call    schedule_tail
+       popl    %eax
+
+       FRAME_END
+       ret
+ENDPROC(schedule_tail_wrapper)
 /*
  * A newly forked process directly context switches into this address.
  *
@@ -259,24 +276,15 @@ END(__switch_to_asm)
  * edi: kernel thread arg
  */
 ENTRY(ret_from_fork)
-       FRAME_BEGIN             /* help unwinder find end of stack */
-
-       /*
-        * schedule_tail() is asmlinkage so we have to put its 'prev' argument
-        * on the stack.
-        */
-       pushl   %eax
-       call    schedule_tail
-       popl    %eax
+       call    schedule_tail_wrapper
 
        testl   %ebx, %ebx
        jnz     1f              /* kernel threads are uncommon */
 
 2:
        /* When we fork, we trace the syscall return in the child, too. */
-       leal    FRAME_OFFSET(%esp), %eax
+       movl    %esp, %eax
        call    syscall_return_slowpath
-       FRAME_END
        jmp     restore_all
 
        /* kernel thread */
index 607d72c4a485cf25a3e582804cea0b7c031e1d32..4a4c0834f9659bd9b4855e23801d47103e644f09 100644 (file)
@@ -36,7 +36,6 @@
 #include <asm/smap.h>
 #include <asm/pgtable_types.h>
 #include <asm/export.h>
-#include <asm/frame.h>
 #include <linux/err.h>
 
 .code64
@@ -406,19 +405,17 @@ END(__switch_to_asm)
  * r12: kernel thread arg
  */
 ENTRY(ret_from_fork)
-       FRAME_BEGIN                     /* help unwinder find end of stack */
        movq    %rax, %rdi
-       call    schedule_tail           /* rdi: 'prev' task parameter */
+       call    schedule_tail                   /* rdi: 'prev' task parameter */
 
-       testq   %rbx, %rbx              /* from kernel_thread? */
-       jnz     1f                      /* kernel threads are uncommon */
+       testq   %rbx, %rbx                      /* from kernel_thread? */
+       jnz     1f                              /* kernel threads are uncommon */
 
 2:
-       leaq    FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
+       movq    %rsp, %rdi
        call    syscall_return_slowpath /* returns with IRQs disabled */
        TRACE_IRQS_ON                   /* user mode is traced as IRQS on */
        SWAPGS
-       FRAME_END
        jmp     restore_regs_and_iret
 
 1:
index 4fd5195deed01836a092dccf680baf6b3e150713..3f9a3d2a52095af1f100e72bc3c24b4fc8d1b397 100644 (file)
@@ -266,6 +266,7 @@ static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *s
 #endif
 
 int mce_available(struct cpuinfo_x86 *c);
+bool mce_is_memory_error(struct mce *m);
 
 DECLARE_PER_CPU(unsigned, mce_exception_count);
 DECLARE_PER_CPU(unsigned, mce_poll_count);
index c5b8f760473c32f090c430693c5b2e8a61cc196f..32e14d13741670efa680c3c9e9facd1ffc0415a5 100644 (file)
@@ -409,8 +409,13 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
                memcpy(insnbuf, replacement, a->replacementlen);
                insnbuf_sz = a->replacementlen;
 
-               /* 0xe8 is a relative jump; fix the offset. */
-               if (*insnbuf == 0xe8 && a->replacementlen == 5) {
+               /*
+                * 0xe8 is a relative jump; fix the offset.
+                *
+                * Instruction length is checked before the opcode to avoid
+                * accessing uninitialized bytes for zero-length replacements.
+                */
+               if (a->replacementlen == 5 && *insnbuf == 0xe8) {
                        *(s32 *)(insnbuf + 1) += replacement - instr;
                        DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
                                *(s32 *)(insnbuf + 1),
index 5abd4bf73d6e6805d033c361e4671087bb7be076..5cfbaeb6529a04bcba6cb6ba7b09610d0f2fa88c 100644 (file)
@@ -499,16 +499,14 @@ static int mce_usable_address(struct mce *m)
        return 1;
 }
 
-static bool memory_error(struct mce *m)
+bool mce_is_memory_error(struct mce *m)
 {
-       struct cpuinfo_x86 *c = &boot_cpu_data;
-
-       if (c->x86_vendor == X86_VENDOR_AMD) {
+       if (m->cpuvendor == X86_VENDOR_AMD) {
                /* ErrCodeExt[20:16] */
                u8 xec = (m->status >> 16) & 0x1f;
 
                return (xec == 0x0 || xec == 0x8);
-       } else if (c->x86_vendor == X86_VENDOR_INTEL) {
+       } else if (m->cpuvendor == X86_VENDOR_INTEL) {
                /*
                 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
                 *
@@ -529,6 +527,7 @@ static bool memory_error(struct mce *m)
 
        return false;
 }
+EXPORT_SYMBOL_GPL(mce_is_memory_error);
 
 static bool cec_add_mce(struct mce *m)
 {
@@ -536,7 +535,7 @@ static bool cec_add_mce(struct mce *m)
                return false;
 
        /* We eat only correctable DRAM errors with usable addresses. */
-       if (memory_error(m) &&
+       if (mce_is_memory_error(m) &&
            !(m->status & MCI_STATUS_UC) &&
            mce_usable_address(m))
                if (!cec_add_elem(m->addr >> PAGE_SHIFT))
@@ -713,7 +712,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 
                severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
 
-               if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
+               if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
                        if (m.status & MCI_STATUS_ADDRV)
                                m.severity = severity;
 
index 45db4d2ebd0118e666c205185b9162d13e33316e..e9f4d762aa5b5cabde501f95045fad6c43fef54b 100644 (file)
@@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
 }
 
 static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
 
 int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
 {
@@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
        if (!desc.mc)
                return -EINVAL;
 
-       ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
-                                desc.data, desc.size);
+       ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
        if (ret != UCODE_OK)
                return -EINVAL;
 
@@ -675,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
 }
 
 static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
 {
        enum ucode_state ret;
 
@@ -689,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
 
 #ifdef CONFIG_X86_32
        /* save BSP's matching patch for early load */
-       if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
-               struct ucode_patch *p = find_patch(cpu);
+       if (save) {
+               struct ucode_patch *p = find_patch(0);
                if (p) {
                        memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
                        memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
@@ -722,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
 {
        char fw_name[36] = "amd-ucode/microcode_amd.bin";
        struct cpuinfo_x86 *c = &cpu_data(cpu);
+       bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
        enum ucode_state ret = UCODE_NFOUND;
        const struct firmware *fw;
 
        /* reload ucode container only on the boot cpu */
-       if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
+       if (!refresh_fw || !bsp)
                return UCODE_OK;
 
        if (c->x86 >= 0x15)
@@ -743,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
                goto fw_release;
        }
 
-       ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
+       ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
 
  fw_release:
        release_firmware(fw);
index 0651e974dcb3a88211db1711a5969434d3163e5a..9bef1bbeba63885e3ba584960b97e8c190b4c5a8 100644 (file)
@@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size)
 {
        return module_alloc(size);
 }
-static inline void tramp_free(void *tramp)
+static inline void tramp_free(void *tramp, int size)
 {
+       int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+       set_memory_nx((unsigned long)tramp, npages);
+       set_memory_rw((unsigned long)tramp, npages);
        module_memfree(tramp);
 }
 #else
@@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size)
 {
        return NULL;
 }
-static inline void tramp_free(void *tramp) { }
+static inline void tramp_free(void *tramp, int size) { }
 #endif
 
 /* Defined as markers to the end of the ftrace default trampolines */
@@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
        /* Copy ftrace_caller onto the trampoline memory */
        ret = probe_kernel_read(trampoline, (void *)start_offset, size);
        if (WARN_ON(ret < 0)) {
-               tramp_free(trampoline);
+               tramp_free(trampoline, *tramp_size);
                return 0;
        }
 
@@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
 
        /* Are we pointing to the reference? */
        if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
-               tramp_free(trampoline);
+               tramp_free(trampoline, *tramp_size);
                return 0;
        }
 
@@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
        unsigned long offset;
        unsigned long ip;
        unsigned int size;
-       int ret;
+       int ret, npages;
 
        if (ops->trampoline) {
                /*
@@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
                 */
                if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
                        return;
+               npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
+               set_memory_rw(ops->trampoline, npages);
        } else {
                ops->trampoline = create_trampoline(ops, &size);
                if (!ops->trampoline)
                        return;
                ops->trampoline_size = size;
+               npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
        }
 
        offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
@@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
        /* Do a safe modify in case the trampoline is executing */
        new = ftrace_call_replace(ip, (unsigned long)func);
        ret = update_ftrace_func(ip, new);
+       set_memory_ro(ops->trampoline, npages);
 
        /* The update should never fail */
        WARN_ON(ret);
@@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
        if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
                return;
 
-       tramp_free((void *)ops->trampoline);
+       tramp_free((void *)ops->trampoline, ops->trampoline_size);
        ops->trampoline = 0;
 }
 
index 5b2bbfbb371284942b2ce95a3b2495eb9ecc8981..6b877807598b9028527a2357fef630431021c383 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/ftrace.h>
 #include <linux/frame.h>
 #include <linux/kasan.h>
+#include <linux/moduleloader.h>
 
 #include <asm/text-patching.h>
 #include <asm/cacheflush.h>
@@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn)
        }
 }
 
+/* Recover page to RW mode before releasing it */
+void free_insn_page(void *page)
+{
+       set_memory_nx((unsigned long)page & PAGE_MASK, 1);
+       set_memory_rw((unsigned long)page & PAGE_MASK, 1);
+       module_memfree(page);
+}
+
 static int arch_copy_kprobe(struct kprobe *p)
 {
        struct insn insn;
index ff40e74c9181f0e009b51909a0e76ce25c1c2cf3..ffeae818aa7a95ffd0395ae9af5fcedd9b599981 100644 (file)
@@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all)
 
        printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
        printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
-               smp_processor_id());
+               raw_smp_processor_id());
 
        printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
                regs->ax, regs->bx, regs->cx, regs->dx);
index 0b4d3c686b1ef94463c8caabce01e38404d1b8c1..f818236950140fa0e3ccfc013e31857676db4f77 100644 (file)
@@ -980,8 +980,6 @@ void __init setup_arch(char **cmdline_p)
         */
        x86_configure_nx();
 
-       simple_udelay_calibration();
-
        parse_early_param();
 
 #ifdef CONFIG_MEMORY_HOTPLUG
@@ -1041,6 +1039,8 @@ void __init setup_arch(char **cmdline_p)
         */
        init_hypervisor_platform();
 
+       simple_udelay_calibration();
+
        x86_init.resources.probe_roms();
 
        /* after parse_early_param, so could debug it */
index 82c6d7f1fd73e64debd566d0846f3b9a62ab8959..b9389d72b2f784887e14acc89a6346a78c13c1b4 100644 (file)
@@ -104,6 +104,11 @@ static inline unsigned long *last_frame(struct unwind_state *state)
        return (unsigned long *)task_pt_regs(state->task) - 2;
 }
 
+static bool is_last_frame(struct unwind_state *state)
+{
+       return state->bp == last_frame(state);
+}
+
 #ifdef CONFIG_X86_32
 #define GCC_REALIGN_WORDS 3
 #else
@@ -115,16 +120,15 @@ static inline unsigned long *last_aligned_frame(struct unwind_state *state)
        return last_frame(state) - GCC_REALIGN_WORDS;
 }
 
-static bool is_last_task_frame(struct unwind_state *state)
+static bool is_last_aligned_frame(struct unwind_state *state)
 {
        unsigned long *last_bp = last_frame(state);
        unsigned long *aligned_bp = last_aligned_frame(state);
 
        /*
-        * We have to check for the last task frame at two different locations
-        * because gcc can occasionally decide to realign the stack pointer and
-        * change the offset of the stack frame in the prologue of a function
-        * called by head/entry code.  Examples:
+        * GCC can occasionally decide to realign the stack pointer and change
+        * the offset of the stack frame in the prologue of a function called
+        * by head/entry code.  Examples:
         *
         * <start_secondary>:
         *      push   %edi
@@ -141,11 +145,38 @@ static bool is_last_task_frame(struct unwind_state *state)
         *      push   %rbp
         *      mov    %rsp,%rbp
         *
-        * Note that after aligning the stack, it pushes a duplicate copy of
-        * the return address before pushing the frame pointer.
+        * After aligning the stack, it pushes a duplicate copy of the return
+        * address before pushing the frame pointer.
+        */
+       return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1));
+}
+
+static bool is_last_ftrace_frame(struct unwind_state *state)
+{
+       unsigned long *last_bp = last_frame(state);
+       unsigned long *last_ftrace_bp = last_bp - 3;
+
+       /*
+        * When unwinding from an ftrace handler of a function called by entry
+        * code, the stack layout of the last frame is:
+        *
+        *   bp
+        *   parent ret addr
+        *   bp
+        *   function ret addr
+        *   parent ret addr
+        *   pt_regs
+        *   -----------------
         */
-       return (state->bp == last_bp ||
-               (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
+       return (state->bp == last_ftrace_bp &&
+               *state->bp == *(state->bp + 2) &&
+               *(state->bp + 1) == *(state->bp + 4));
+}
+
+static bool is_last_task_frame(struct unwind_state *state)
+{
+       return is_last_frame(state) || is_last_aligned_frame(state) ||
+              is_last_ftrace_frame(state);
 }
 
 /*
index c329d28949056e2d6ad4688e411c5644b81e4d68..d24c8742d9b0aa6df35d5e479e627ff008ea221f 100644 (file)
@@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
 
 static void cancel_hv_timer(struct kvm_lapic *apic)
 {
+       preempt_disable();
        kvm_x86_ops->cancel_hv_timer(apic->vcpu);
        apic->lapic_timer.hv_timer_in_use = false;
+       preempt_enable();
 }
 
 static bool start_hv_timer(struct kvm_lapic *apic)
@@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        for (i = 0; i < KVM_APIC_LVT_NUM; i++)
                kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
        apic_update_lvtt(apic);
-       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
+       if (kvm_vcpu_is_reset_bsp(vcpu) &&
+           kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
                kvm_lapic_set_reg(apic, APIC_LVT0,
                             SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
        apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
index 183ddb235fb48658028433d451db75d554152c54..ba9891ac5c568f1798555bfa9dcbc421fff5ae2a 100644 (file)
@@ -1807,7 +1807,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
         * AMD's VMCB does not have an explicit unusable field, so emulate it
         * for cross vendor migration purposes by "not present"
         */
-       var->unusable = !var->present || (var->type == 0);
+       var->unusable = !var->present;
 
        switch (seg) {
        case VCPU_SREG_TR:
@@ -1840,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
                 */
                if (var->unusable)
                        var->db = 0;
+               /* This is symmetric with svm_set_segment() */
                var->dpl = to_svm(vcpu)->vmcb->save.cpl;
                break;
        }
@@ -1980,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
        s->base = var->base;
        s->limit = var->limit;
        s->selector = var->selector;
-       if (var->unusable)
-               s->attrib = 0;
-       else {
-               s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
-               s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
-               s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
-               s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
-               s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
-               s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
-               s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
-               s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
-       }
+       s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+       s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+       s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+       s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
+       s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+       s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+       s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+       s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
 
        /*
         * This is always accurate, except if SYSRET returned to a segment
@@ -2000,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
         * would entail passing the CPL to userspace and back.
         */
        if (seg == VCPU_SREG_SS)
-               svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+               /* This is symmetric with svm_get_segment() */
+               svm->vmcb->save.cpl = (var->dpl & 3);
 
        mark_dirty(svm->vmcb, VMCB_SEG);
 }
index 72f78396bc0960968161b66ccee00c42fa203fb7..9b4b5d6dcd34755acc0c09525ca93b3408ee4128 100644 (file)
@@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-/*
- * This function performs the various checks including
- * - if it's 4KB aligned
- * - No bits beyond the physical address width are set
- * - Returns 0 on success or else 1
- * (Intel SDM Section 30.3)
- */
-static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
-                                 gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
 {
        gva_t gva;
-       gpa_t vmptr;
        struct x86_exception e;
-       struct page *page;
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-       int maxphyaddr = cpuid_maxphyaddr(vcpu);
 
        if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
                        vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
                return 1;
 
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
-                               sizeof(vmptr), &e)) {
+       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
+                               sizeof(*vmpointer), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
 
-       switch (exit_reason) {
-       case EXIT_REASON_VMON:
-               /*
-                * SDM 3: 24.11.5
-                * The first 4 bytes of VMXON region contain the supported
-                * VMCS revision identifier
-                *
-                * Note - IA32_VMX_BASIC[48] will never be 1
-                * for the nested case;
-                * which replaces physical address width with 32
-                *
-                */
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               page = nested_get_page(vcpu, vmptr);
-               if (page == NULL) {
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               if (*(u32 *)kmap(page) != VMCS12_REVISION) {
-                       kunmap(page);
-                       nested_release_page_clean(page);
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               kunmap(page);
-               nested_release_page_clean(page);
-               vmx->nested.vmxon_ptr = vmptr;
-               break;
-       case EXIT_REASON_VMCLEAR:
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMCLEAR_INVALID_ADDRESS);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               if (vmptr == vmx->nested.vmxon_ptr) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMCLEAR_VMXON_POINTER);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               break;
-       case EXIT_REASON_VMPTRLD:
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMPTRLD_INVALID_ADDRESS);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               if (vmptr == vmx->nested.vmxon_ptr) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMPTRLD_VMXON_POINTER);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               break;
-       default:
-               return 1; /* shouldn't happen */
-       }
-
-       if (vmpointer)
-               *vmpointer = vmptr;
        return 0;
 }
 
@@ -7066,6 +6990,8 @@ out_msr_bitmap:
 static int handle_vmon(struct kvm_vcpu *vcpu)
 {
        int ret;
+       gpa_t vmptr;
+       struct page *page;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
                | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
+
+       /*
+        * SDM 3: 24.11.5
+        * The first 4 bytes of VMXON region contain the supported
+        * VMCS revision identifier
+        *
+        * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
+        * which replaces physical address width with 32
+        */
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       page = nested_get_page(vcpu, vmptr);
+       if (page == NULL) {
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+       if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+               kunmap(page);
+               nested_release_page_clean(page);
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+       kunmap(page);
+       nested_release_page_clean(page);
+
+       vmx->nested.vmxon_ptr = vmptr;
        ret = enter_vmx_operation(vcpu);
        if (ret)
                return ret;
@@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
 
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       if (vmptr == vmx->nested.vmxon_ptr) {
+               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
        if (vmptr == vmx->nested.current_vmptr)
                nested_release_vmcs12(vmx);
 
@@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
 
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       if (vmptr == vmx->nested.vmxon_ptr) {
+               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
        if (vmx->nested.current_vmptr != vmptr) {
                struct vmcs12 *new_vmcs12;
                struct page *page;
@@ -7913,11 +7887,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
 {
        unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        int cr = exit_qualification & 15;
-       int reg = (exit_qualification >> 8) & 15;
-       unsigned long val = kvm_register_readl(vcpu, reg);
+       int reg;
+       unsigned long val;
 
        switch ((exit_qualification >> 4) & 3) {
        case 0: /* mov to cr */
+               reg = (exit_qualification >> 8) & 15;
+               val = kvm_register_readl(vcpu, reg);
                switch (cr) {
                case 0:
                        if (vmcs12->cr0_guest_host_mask &
@@ -7972,6 +7948,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
                 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
                 * cr0. Other attempted changes are ignored, with no exit.
                 */
+               val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
                if (vmcs12->cr0_guest_host_mask & 0xe &
                    (val ^ vmcs12->cr0_read_shadow))
                        return true;
index 02363e37d4a61e8271d7fed0a8c534e9dd90f264..a2cd0997343c485051e849551b9fc9d904177fe0 100644 (file)
@@ -8394,10 +8394,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
        if (vcpu->arch.pv.pv_unhalted)
                return true;
 
-       if (atomic_read(&vcpu->arch.nmi_queued))
+       if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+           (vcpu->arch.nmi_pending &&
+            kvm_x86_ops->nmi_allowed(vcpu)))
                return true;
 
-       if (kvm_test_request(KVM_REQ_SMI, vcpu))
+       if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
+           (vcpu->arch.smi_pending && !is_smm(vcpu)))
                return true;
 
        if (kvm_arch_interrupt_allowed(vcpu) &&
index 1dcd2be4cce44aabd5659b531962f918da1b8dd3..c8520b2c62d252f774f7c6b9df83eefa6a5925c2 100644 (file)
@@ -186,7 +186,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
        unsigned int i, level;
        unsigned long addr;
 
-       BUG_ON(irqs_disabled());
+       BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
        WARN_ON(PAGE_ALIGN(start) != start);
 
        on_each_cpu(__cpa_flush_range, NULL, 1);
index 7e76a4d8304bc5add30e5f86d16e4f5b423a24f6..43b96f5f78ba8c9c323c5ae6090c19f3ae290ad0 100644 (file)
@@ -828,9 +828,11 @@ static void __init kexec_enter_virtual_mode(void)
 
        /*
         * We don't do virtual mode, since we don't do runtime services, on
-        * non-native EFI
+        * non-native EFI. With efi=old_map, we don't do runtime services in
+        * kexec kernel because in the initial boot something else might
+        * have been mapped at these virtual addresses.
         */
-       if (!efi_is_native()) {
+       if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
                efi_memmap_unmap();
                clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
                return;
index c488625c9712de4fe150d01df5c260e650967265..eb8dff15a7f63721d0f07845263da1af75670771 100644 (file)
@@ -71,11 +71,13 @@ static void __init early_code_mapping_set_exec(int executable)
 
 pgd_t * __init efi_call_phys_prolog(void)
 {
-       unsigned long vaddress;
-       pgd_t *save_pgd;
+       unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
+       pgd_t *save_pgd, *pgd_k, *pgd_efi;
+       p4d_t *p4d, *p4d_k, *p4d_efi;
+       pud_t *pud;
 
        int pgd;
-       int n_pgds;
+       int n_pgds, i, j;
 
        if (!efi_enabled(EFI_OLD_MEMMAP)) {
                save_pgd = (pgd_t *)read_cr3();
@@ -88,10 +90,49 @@ pgd_t * __init efi_call_phys_prolog(void)
        n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
        save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
 
+       /*
+        * Build 1:1 identity mapping for efi=old_map usage. Note that
+        * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
+        * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
+        * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
+        * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
+        * This means here we can only reuse the PMD tables of the direct mapping.
+        */
        for (pgd = 0; pgd < n_pgds; pgd++) {
-               save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
-               vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
-               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+               addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
+               vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
+               pgd_efi = pgd_offset_k(addr_pgd);
+               save_pgd[pgd] = *pgd_efi;
+
+               p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
+               if (!p4d) {
+                       pr_err("Failed to allocate p4d table!\n");
+                       goto out;
+               }
+
+               for (i = 0; i < PTRS_PER_P4D; i++) {
+                       addr_p4d = addr_pgd + i * P4D_SIZE;
+                       p4d_efi = p4d + p4d_index(addr_p4d);
+
+                       pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
+                       if (!pud) {
+                               pr_err("Failed to allocate pud table!\n");
+                               goto out;
+                       }
+
+                       for (j = 0; j < PTRS_PER_PUD; j++) {
+                               addr_pud = addr_p4d + j * PUD_SIZE;
+
+                               if (addr_pud > (max_pfn << PAGE_SHIFT))
+                                       break;
+
+                               vaddr = (unsigned long)__va(addr_pud);
+
+                               pgd_k = pgd_offset_k(vaddr);
+                               p4d_k = p4d_offset(pgd_k, vaddr);
+                               pud[j] = *pud_offset(p4d_k, vaddr);
+                       }
+               }
        }
 out:
        __flush_tlb_all();
@@ -104,8 +145,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
        /*
         * After the lock is released, the original page table is restored.
         */
-       int pgd_idx;
+       int pgd_idx, i;
        int nr_pgds;
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
 
        if (!efi_enabled(EFI_OLD_MEMMAP)) {
                write_cr3((unsigned long)save_pgd);
@@ -115,9 +159,28 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
 
        nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
 
-       for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
+       for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
+               pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
 
+               if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+                       continue;
+
+               for (i = 0; i < PTRS_PER_P4D; i++) {
+                       p4d = p4d_offset(pgd,
+                                        pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
+
+                       if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+                               continue;
+
+                       pud = (pud_t *)p4d_page_vaddr(*p4d);
+                       pud_free(&init_mm, pud);
+               }
+
+               p4d = (p4d_t *)pgd_page_vaddr(*pgd);
+               p4d_free(&init_mm, p4d);
+       }
+
        kfree(save_pgd);
 
        __flush_tlb_all();
index 26615991d69cc8b024470c921aca227c2756e439..e0cf95a83f3fab918eb73715d188e631e02a1425 100644 (file)
@@ -360,6 +360,9 @@ void __init efi_free_boot_services(void)
                free_bootmem_late(start, size);
        }
 
+       if (!num_entries)
+               return;
+
        new_size = efi.memmap.desc_size * num_entries;
        new_phys = efi_memmap_alloc(num_entries);
        if (!new_phys) {
index 7c2947128f5813a677a0361eddcd277b5946d03e..0480892e97e501807a7f14f843eb549719f33c81 100644 (file)
@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 
        if (blkg->blkcg != &blkcg_root)
-               blk_exit_rl(&blkg->rl);
+               blk_exit_rl(blkg->q, &blkg->rl);
 
        blkg_rwstat_exit(&blkg->stat_ios);
        blkg_rwstat_exit(&blkg->stat_bytes);
index c7068520794bd0ba060b905f850efaae6a8cbd36..a7421b772d0e0e3f4b8372fbc11aefd83763d30a 100644 (file)
@@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
        if (!rl->rq_pool)
                return -ENOMEM;
 
+       if (rl != &q->root_rl)
+               WARN_ON_ONCE(!blk_get_queue(q));
+
        return 0;
 }
 
-void blk_exit_rl(struct request_list *rl)
+void blk_exit_rl(struct request_queue *q, struct request_list *rl)
 {
-       if (rl->rq_pool)
+       if (rl->rq_pool) {
                mempool_destroy(rl->rq_pool);
+               if (rl != &q->root_rl)
+                       blk_put_queue(q);
+       }
 }
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
index a69ad122ed66c6b93385f1b3959a893b407b9d05..1bcccedcc74f0b48f58363640acb1eae04704800 100644 (file)
@@ -628,25 +628,6 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
 }
 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
 
-void blk_mq_abort_requeue_list(struct request_queue *q)
-{
-       unsigned long flags;
-       LIST_HEAD(rq_list);
-
-       spin_lock_irqsave(&q->requeue_lock, flags);
-       list_splice_init(&q->requeue_list, &rq_list);
-       spin_unlock_irqrestore(&q->requeue_lock, flags);
-
-       while (!list_empty(&rq_list)) {
-               struct request *rq;
-
-               rq = list_first_entry(&rq_list, struct request, queuelist);
-               list_del_init(&rq->queuelist);
-               blk_mq_end_request(rq, -EIO);
-       }
-}
-EXPORT_SYMBOL(blk_mq_abort_requeue_list);
-
 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
 {
        if (tag < tags->nr_tags) {
@@ -2660,7 +2641,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
        return ret;
 }
 
-void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+                                                       int nr_hw_queues)
 {
        struct request_queue *q;
 
@@ -2684,6 +2666,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_unfreeze_queue(q);
 }
+
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+{
+       mutex_lock(&set->tag_list_lock);
+       __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+       mutex_unlock(&set->tag_list_lock);
+}
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
 /* Enable polling stats and return whether they were already enabled. */
index 504fee9400523e206f987542d59abf8167e31c7e..283da7fbe03408d9eef71ba3e1a4f863671d761b 100644 (file)
@@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj)
 
        blk_free_queue_stats(q->stats);
 
-       blk_exit_rl(&q->root_rl);
+       blk_exit_rl(q, &q->root_rl);
 
        if (q->queue_tags)
                __blk_queue_free_tags(q);
@@ -887,10 +887,10 @@ int blk_register_queue(struct gendisk *disk)
                goto unlock;
        }
 
-       if (q->mq_ops)
+       if (q->mq_ops) {
                __blk_mq_register_dev(dev, q);
-
-       blk_mq_debugfs_register(q);
+               blk_mq_debugfs_register(q);
+       }
 
        kobject_uevent(&q->kobj, KOBJ_ADD);
 
index b78db2e5fdff1e158ea52c179313ff3eba282015..fc13dd0c6e3956a84913d9e71132c0f321a67280 100644 (file)
@@ -22,11 +22,11 @@ static int throtl_quantum = 32;
 #define DFL_THROTL_SLICE_HD (HZ / 10)
 #define DFL_THROTL_SLICE_SSD (HZ / 50)
 #define MAX_THROTL_SLICE (HZ)
-#define DFL_IDLE_THRESHOLD_SSD (1000L) /* 1 ms */
-#define DFL_IDLE_THRESHOLD_HD (100L * 1000) /* 100 ms */
 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
-/* default latency target is 0, eg, guarantee IO latency by default */
-#define DFL_LATENCY_TARGET (0)
+#define MIN_THROTL_BPS (320 * 1024)
+#define MIN_THROTL_IOPS (10)
+#define DFL_LATENCY_TARGET (-1L)
+#define DFL_IDLE_THRESHOLD (0)
 
 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
 
@@ -157,6 +157,7 @@ struct throtl_grp {
        unsigned long last_check_time;
 
        unsigned long latency_target; /* us */
+       unsigned long latency_target_conf; /* us */
        /* When did we start a new slice */
        unsigned long slice_start[2];
        unsigned long slice_end[2];
@@ -165,6 +166,7 @@ struct throtl_grp {
        unsigned long checked_last_finish_time; /* ns / 1024 */
        unsigned long avg_idletime; /* ns / 1024 */
        unsigned long idletime_threshold; /* us */
+       unsigned long idletime_threshold_conf; /* us */
 
        unsigned int bio_cnt; /* total bios */
        unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
@@ -201,8 +203,6 @@ struct throtl_data
        unsigned int limit_index;
        bool limit_valid[LIMIT_CNT];
 
-       unsigned long dft_idletime_threshold; /* us */
-
        unsigned long low_upgrade_time;
        unsigned long low_downgrade_time;
 
@@ -294,8 +294,14 @@ static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
 
        td = tg->td;
        ret = tg->bps[rw][td->limit_index];
-       if (ret == 0 && td->limit_index == LIMIT_LOW)
-               return tg->bps[rw][LIMIT_MAX];
+       if (ret == 0 && td->limit_index == LIMIT_LOW) {
+               /* intermediate node or iops isn't 0 */
+               if (!list_empty(&blkg->blkcg->css.children) ||
+                   tg->iops[rw][td->limit_index])
+                       return U64_MAX;
+               else
+                       return MIN_THROTL_BPS;
+       }
 
        if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
            tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
@@ -315,10 +321,17 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
 
        if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
                return UINT_MAX;
+
        td = tg->td;
        ret = tg->iops[rw][td->limit_index];
-       if (ret == 0 && tg->td->limit_index == LIMIT_LOW)
-               return tg->iops[rw][LIMIT_MAX];
+       if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
+               /* intermediate node or bps isn't 0 */
+               if (!list_empty(&blkg->blkcg->css.children) ||
+                   tg->bps[rw][td->limit_index])
+                       return UINT_MAX;
+               else
+                       return MIN_THROTL_IOPS;
+       }
 
        if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
            tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
@@ -482,6 +495,9 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
        /* LIMIT_LOW will have default value 0 */
 
        tg->latency_target = DFL_LATENCY_TARGET;
+       tg->latency_target_conf = DFL_LATENCY_TARGET;
+       tg->idletime_threshold = DFL_IDLE_THRESHOLD;
+       tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
 
        return &tg->pd;
 }
@@ -510,8 +526,6 @@ static void throtl_pd_init(struct blkg_policy_data *pd)
        if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
                sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
        tg->td = td;
-
-       tg->idletime_threshold = td->dft_idletime_threshold;
 }
 
 /*
@@ -1349,7 +1363,7 @@ static int tg_print_conf_uint(struct seq_file *sf, void *v)
        return 0;
 }
 
-static void tg_conf_updated(struct throtl_grp *tg)
+static void tg_conf_updated(struct throtl_grp *tg, bool global)
 {
        struct throtl_service_queue *sq = &tg->service_queue;
        struct cgroup_subsys_state *pos_css;
@@ -1367,8 +1381,26 @@ static void tg_conf_updated(struct throtl_grp *tg)
         * restrictions in the whole hierarchy and allows them to bypass
         * blk-throttle.
         */
-       blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg))
-               tg_update_has_rules(blkg_to_tg(blkg));
+       blkg_for_each_descendant_pre(blkg, pos_css,
+                       global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
+               struct throtl_grp *this_tg = blkg_to_tg(blkg);
+               struct throtl_grp *parent_tg;
+
+               tg_update_has_rules(this_tg);
+               /* ignore root/second level */
+               if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
+                   !blkg->parent->parent)
+                       continue;
+               parent_tg = blkg_to_tg(blkg->parent);
+               /*
+                * make sure all children has lower idle time threshold and
+                * higher latency target
+                */
+               this_tg->idletime_threshold = min(this_tg->idletime_threshold,
+                               parent_tg->idletime_threshold);
+               this_tg->latency_target = max(this_tg->latency_target,
+                               parent_tg->latency_target);
+       }
 
        /*
         * We're already holding queue_lock and know @tg is valid.  Let's
@@ -1413,7 +1445,7 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
        else
                *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
 
-       tg_conf_updated(tg);
+       tg_conf_updated(tg, false);
        ret = 0;
 out_finish:
        blkg_conf_finish(&ctx);
@@ -1497,34 +1529,34 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
            tg->iops_conf[READ][off] == iops_dft &&
            tg->iops_conf[WRITE][off] == iops_dft &&
            (off != LIMIT_LOW ||
-            (tg->idletime_threshold == tg->td->dft_idletime_threshold &&
-             tg->latency_target == DFL_LATENCY_TARGET)))
+            (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
+             tg->latency_target_conf == DFL_LATENCY_TARGET)))
                return 0;
 
-       if (tg->bps_conf[READ][off] != bps_dft)
+       if (tg->bps_conf[READ][off] != U64_MAX)
                snprintf(bufs[0], sizeof(bufs[0]), "%llu",
                        tg->bps_conf[READ][off]);
-       if (tg->bps_conf[WRITE][off] != bps_dft)
+       if (tg->bps_conf[WRITE][off] != U64_MAX)
                snprintf(bufs[1], sizeof(bufs[1]), "%llu",
                        tg->bps_conf[WRITE][off]);
-       if (tg->iops_conf[READ][off] != iops_dft)
+       if (tg->iops_conf[READ][off] != UINT_MAX)
                snprintf(bufs[2], sizeof(bufs[2]), "%u",
                        tg->iops_conf[READ][off]);
-       if (tg->iops_conf[WRITE][off] != iops_dft)
+       if (tg->iops_conf[WRITE][off] != UINT_MAX)
                snprintf(bufs[3], sizeof(bufs[3]), "%u",
                        tg->iops_conf[WRITE][off]);
        if (off == LIMIT_LOW) {
-               if (tg->idletime_threshold == ULONG_MAX)
+               if (tg->idletime_threshold_conf == ULONG_MAX)
                        strcpy(idle_time, " idle=max");
                else
                        snprintf(idle_time, sizeof(idle_time), " idle=%lu",
-                               tg->idletime_threshold);
+                               tg->idletime_threshold_conf);
 
-               if (tg->latency_target == ULONG_MAX)
+               if (tg->latency_target_conf == ULONG_MAX)
                        strcpy(latency_time, " latency=max");
                else
                        snprintf(latency_time, sizeof(latency_time),
-                               " latency=%lu", tg->latency_target);
+                               " latency=%lu", tg->latency_target_conf);
        }
 
        seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
@@ -1563,8 +1595,8 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
        v[2] = tg->iops_conf[READ][index];
        v[3] = tg->iops_conf[WRITE][index];
 
-       idle_time = tg->idletime_threshold;
-       latency_time = tg->latency_target;
+       idle_time = tg->idletime_threshold_conf;
+       latency_time = tg->latency_target_conf;
        while (true) {
                char tok[27];   /* wiops=18446744073709551616 */
                char *p;
@@ -1623,17 +1655,33 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
                tg->iops_conf[READ][LIMIT_MAX]);
        tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
                tg->iops_conf[WRITE][LIMIT_MAX]);
+       tg->idletime_threshold_conf = idle_time;
+       tg->latency_target_conf = latency_time;
+
+       /* force user to configure all settings for low limit  */
+       if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
+             tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
+           tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
+           tg->latency_target_conf == DFL_LATENCY_TARGET) {
+               tg->bps[READ][LIMIT_LOW] = 0;
+               tg->bps[WRITE][LIMIT_LOW] = 0;
+               tg->iops[READ][LIMIT_LOW] = 0;
+               tg->iops[WRITE][LIMIT_LOW] = 0;
+               tg->idletime_threshold = DFL_IDLE_THRESHOLD;
+               tg->latency_target = DFL_LATENCY_TARGET;
+       } else if (index == LIMIT_LOW) {
+               tg->idletime_threshold = tg->idletime_threshold_conf;
+               tg->latency_target = tg->latency_target_conf;
+       }
 
-       if (index == LIMIT_LOW) {
-               blk_throtl_update_limit_valid(tg->td);
-               if (tg->td->limit_valid[LIMIT_LOW])
+       blk_throtl_update_limit_valid(tg->td);
+       if (tg->td->limit_valid[LIMIT_LOW]) {
+               if (index == LIMIT_LOW)
                        tg->td->limit_index = LIMIT_LOW;
-               tg->idletime_threshold = (idle_time == ULONG_MAX) ?
-                       ULONG_MAX : idle_time;
-               tg->latency_target = (latency_time == ULONG_MAX) ?
-                       ULONG_MAX : latency_time;
-       }
-       tg_conf_updated(tg);
+       } else
+               tg->td->limit_index = LIMIT_MAX;
+       tg_conf_updated(tg, index == LIMIT_LOW &&
+               tg->td->limit_valid[LIMIT_LOW]);
        ret = 0;
 out_finish:
        blkg_conf_finish(&ctx);
@@ -1722,17 +1770,25 @@ static bool throtl_tg_is_idle(struct throtl_grp *tg)
        /*
         * cgroup is idle if:
         * - single idle is too long, longer than a fixed value (in case user
-        *   configure a too big threshold) or 4 times of slice
+        *   configure a too big threshold) or 4 times of idletime threshold
         * - average think time is more than threshold
         * - IO latency is largely below threshold
         */
-       unsigned long time = jiffies_to_usecs(4 * tg->td->throtl_slice);
-
-       time = min_t(unsigned long, MAX_IDLE_TIME, time);
-       return (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
-              tg->avg_idletime > tg->idletime_threshold ||
-              (tg->latency_target && tg->bio_cnt &&
+       unsigned long time;
+       bool ret;
+
+       time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
+       ret = tg->latency_target == DFL_LATENCY_TARGET ||
+             tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
+             (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
+             tg->avg_idletime > tg->idletime_threshold ||
+             (tg->latency_target && tg->bio_cnt &&
                tg->bad_bio_cnt * 5 < tg->bio_cnt);
+       throtl_log(&tg->service_queue,
+               "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
+               tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
+               tg->bio_cnt, ret, tg->td->scale);
+       return ret;
 }
 
 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
@@ -1828,6 +1884,7 @@ static void throtl_upgrade_state(struct throtl_data *td)
        struct cgroup_subsys_state *pos_css;
        struct blkcg_gq *blkg;
 
+       throtl_log(&td->service_queue, "upgrade to max");
        td->limit_index = LIMIT_MAX;
        td->low_upgrade_time = jiffies;
        td->scale = 0;
@@ -1850,6 +1907,7 @@ static void throtl_downgrade_state(struct throtl_data *td, int new)
 {
        td->scale /= 2;
 
+       throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
        if (td->scale) {
                td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
                return;
@@ -2023,6 +2081,11 @@ static void throtl_update_latency_buckets(struct throtl_data *td)
                td->avg_buckets[i].valid = true;
                last_latency = td->avg_buckets[i].latency;
        }
+
+       for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
+               throtl_log(&td->service_queue,
+                       "Latency bucket %d: latency=%ld, valid=%d", i,
+                       td->avg_buckets[i].latency, td->avg_buckets[i].valid);
 }
 #else
 static inline void throtl_update_latency_buckets(struct throtl_data *td)
@@ -2354,19 +2417,14 @@ void blk_throtl_exit(struct request_queue *q)
 void blk_throtl_register_queue(struct request_queue *q)
 {
        struct throtl_data *td;
-       struct cgroup_subsys_state *pos_css;
-       struct blkcg_gq *blkg;
 
        td = q->td;
        BUG_ON(!td);
 
-       if (blk_queue_nonrot(q)) {
+       if (blk_queue_nonrot(q))
                td->throtl_slice = DFL_THROTL_SLICE_SSD;
-               td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_SSD;
-       } else {
+       else
                td->throtl_slice = DFL_THROTL_SLICE_HD;
-               td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_HD;
-       }
 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
        /* if no low limit, use previous default */
        td->throtl_slice = DFL_THROTL_SLICE_HD;
@@ -2375,18 +2433,6 @@ void blk_throtl_register_queue(struct request_queue *q)
        td->track_bio_latency = !q->mq_ops && !q->request_fn;
        if (!td->track_bio_latency)
                blk_stat_enable_accounting(q);
-
-       /*
-        * some tg are created before queue is fully initialized, eg, nonrot
-        * isn't initialized yet
-        */
-       rcu_read_lock();
-       blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
-               struct throtl_grp *tg = blkg_to_tg(blkg);
-
-               tg->idletime_threshold = td->dft_idletime_threshold;
-       }
-       rcu_read_unlock();
 }
 
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
index 2ed70228e44fc706e6efee71ca000e5e47433217..83c8e1100525f7dd80b9a75e83cd2f8efb0f5969 100644 (file)
@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
 
 int blk_init_rl(struct request_list *rl, struct request_queue *q,
                gfp_t gfp_mask);
-void blk_exit_rl(struct request_list *rl);
+void blk_exit_rl(struct request_queue *q, struct request_list *rl);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                        struct bio *bio);
 void blk_queue_bypass_start(struct request_queue *q);
index da69b079725fbf62a407db76f7c5c430c52be3f9..b7e9c7feeab2acbd1a846d0c31285460aba076ec 100644 (file)
@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 
 /*
- * offset from end of service tree
+ * offset from end of queue service tree for idle class
  */
 #define CFQ_IDLE_DELAY         (NSEC_PER_SEC / 5)
+/* offset from end of group service tree under time slice mode */
+#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
+/* offset from end of group service under IOPS mode */
+#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
 
 /*
  * below this threshold, we consider thinktime immediate
@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
        cfqg->vfraction = max_t(unsigned, vfr, 1);
 }
 
+static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
+{
+       if (!iops_mode(cfqd))
+               return CFQ_SLICE_MODE_GROUP_DELAY;
+       else
+               return CFQ_IOPS_MODE_GROUP_DELAY;
+}
+
 static void
 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
        n = rb_last(&st->rb);
        if (n) {
                __cfqg = rb_entry_cfqg(n);
-               cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
+               cfqg->vdisktime = __cfqg->vdisktime +
+                       cfq_get_cfqg_vdisktime_delay(cfqd);
        } else
                cfqg->vdisktime = st->min_vdisktime;
        cfq_group_service_tree_add(st, cfqg);
index ff07b9143ca456f8b2e8aaa3f41f9d6d8b5fe7a3..c5ec8246e25e1ed3eb6aef683c99e4aacd32ce2c 100644 (file)
@@ -320,8 +320,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
 
        if (info) {
                struct partition_meta_info *pinfo = alloc_part_info(disk);
-               if (!pinfo)
+               if (!pinfo) {
+                       err = -ENOMEM;
                        goto out_free_stats;
+               }
                memcpy(pinfo, info, sizeof(*info));
                p->info = pinfo;
        }
index 93e7c1b32eddd5aa27fc8c96f5f581f712541a53..5610cd537da78812e2633d76ca90e5c3fb66e7cc 100644 (file)
@@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state,
                        continue;
                bsd_start = le32_to_cpu(p->p_offset);
                bsd_size = le32_to_cpu(p->p_size);
+               if (memcmp(flavour, "bsd\0", 4) == 0)
+                       bsd_start += offset;
                if (offset == bsd_start && size == bsd_size)
                        /* full parent partition, we have it already */
                        continue;
index d3a989e718f53518bafe93ddb9efea419e5d9b30..3cd6e12cfc467d27ccf2830fffde82e1aaf0f45e 100644 (file)
@@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey,
         * signature and returns that to us.
         */
        ret = crypto_akcipher_verify(req);
-       if (ret == -EINPROGRESS) {
+       if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
                wait_for_completion(&compl.completion);
                ret = compl.err;
        }
index fa749f47013508d562366fb0484e7ea80535ba0a..cdb27ac4b2266eccff2ba89a5388ec2fbf6d18bc 100644 (file)
@@ -1767,9 +1767,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
                        break;
                case -EINPROGRESS:
                case -EBUSY:
-                       ret = wait_for_completion_interruptible(
-                               &drbg->ctr_completion);
-                       if (!ret && !drbg->ctr_async_err) {
+                       wait_for_completion(&drbg->ctr_completion);
+                       if (!drbg->ctr_async_err) {
                                reinit_completion(&drbg->ctr_completion);
                                break;
                        }
index b7ad808be3d4ec6c3822ce2cc5c0428d8f3b3dd0..3841b5eafa7ee244f605c28fd56c5a8c5dcaba9b 100644 (file)
@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 
        err = crypto_skcipher_encrypt(&data->req);
        if (err == -EINPROGRESS || err == -EBUSY) {
-               err = wait_for_completion_interruptible(
-                       &data->result.completion);
-               if (!err)
-                       err = data->result.err;
+               wait_for_completion(&data->result.completion);
+               err = data->result.err;
        }
 
        if (err)
index 014af741fc6a3d78c769baa725774be7d9815851..4faa0fd53b0c120d39022ad726dbbe2c74f787bd 100644 (file)
@@ -764,6 +764,44 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
        return 0;
 }
 
+static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
+                                    const u8 *key, unsigned int keylen)
+{
+       unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+       struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
+       u8 *buffer, *alignbuffer;
+       unsigned long absize;
+       int ret;
+
+       absize = keylen + alignmask;
+       buffer = kmalloc(absize, GFP_ATOMIC);
+       if (!buffer)
+               return -ENOMEM;
+
+       alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+       memcpy(alignbuffer, key, keylen);
+       ret = cipher->setkey(tfm, alignbuffer, keylen);
+       kzfree(buffer);
+       return ret;
+}
+
+static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                          unsigned int keylen)
+{
+       struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
+       unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+
+       if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
+               crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+
+       if ((unsigned long)key & alignmask)
+               return skcipher_setkey_unaligned(tfm, key, keylen);
+
+       return cipher->setkey(tfm, key, keylen);
+}
+
 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
 {
        struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
@@ -784,7 +822,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
            tfm->__crt_alg->cra_type == &crypto_givcipher_type)
                return crypto_init_skcipher_ops_ablkcipher(tfm);
 
-       skcipher->setkey = alg->setkey;
+       skcipher->setkey = skcipher_setkey;
        skcipher->encrypt = alg->encrypt;
        skcipher->decrypt = alg->decrypt;
        skcipher->ivsize = alg->ivsize;
index 5a968a78652bd23d269fbc871957a4488fb03af6..7abe6650573950674ce41a7511fd9abe2736c451 100644 (file)
@@ -418,11 +418,7 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc,
 
        table_desc->validation_count++;
        if (table_desc->validation_count == 0) {
-               ACPI_ERROR((AE_INFO,
-                           "Table %p, Validation count is zero after increment\n",
-                           table_desc));
                table_desc->validation_count--;
-               return_ACPI_STATUS(AE_LIMIT);
        }
 
        *out_table = table_desc->pointer;
index a9a9ab3399d47ff8087e62d495f1d2ba930fc1d3..d42eeef9d9287815ce5f4c82d8d915ae5deabe51 100644 (file)
@@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
        if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
            (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
             (battery->capacity_now <= battery->alarm)))
-               pm_wakeup_hard_event(&battery->device->dev);
+               pm_wakeup_event(&battery->device->dev, 0);
 
        return result;
 }
index b7c2a06963d6fb79cb5f5aaa2576936d17d5088f..e19f530f1083a13732328516925e3bbeb6493e14 100644 (file)
@@ -57,6 +57,7 @@
 
 #define ACPI_BUTTON_LID_INIT_IGNORE    0x00
 #define ACPI_BUTTON_LID_INIT_OPEN      0x01
+#define ACPI_BUTTON_LID_INIT_METHOD    0x02
 
 #define _COMPONENT             ACPI_BUTTON_COMPONENT
 ACPI_MODULE_NAME("button");
@@ -112,7 +113,7 @@ struct acpi_button {
 
 static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
 static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
 
 static unsigned long lid_report_interval __read_mostly = 500;
 module_param(lid_report_interval, ulong, 0644);
@@ -216,7 +217,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
        }
 
        if (state)
-               pm_wakeup_hard_event(&device->dev);
+               pm_wakeup_event(&device->dev, 0);
 
        ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
        if (ret == NOTIFY_DONE)
@@ -376,6 +377,9 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
        case ACPI_BUTTON_LID_INIT_OPEN:
                (void)acpi_lid_notify_state(device, 1);
                break;
+       case ACPI_BUTTON_LID_INIT_METHOD:
+               (void)acpi_lid_update_state(device);
+               break;
        case ACPI_BUTTON_LID_INIT_IGNORE:
        default:
                break;
@@ -398,7 +402,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
                } else {
                        int keycode;
 
-                       pm_wakeup_hard_event(&device->dev);
+                       pm_wakeup_event(&device->dev, 0);
                        if (button->suspended)
                                break;
 
@@ -530,7 +534,6 @@ static int acpi_button_add(struct acpi_device *device)
                lid_device = device;
        }
 
-       device_init_wakeup(&device->dev, true);
        printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
        return 0;
 
@@ -560,6 +563,9 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
        if (!strncmp(val, "open", sizeof("open") - 1)) {
                lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
                pr_info("Notify initial lid state as open\n");
+       } else if (!strncmp(val, "method", sizeof("method") - 1)) {
+               lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+               pr_info("Notify initial lid state with _LID return value\n");
        } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) {
                lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE;
                pr_info("Do not notify initial lid state\n");
@@ -573,6 +579,8 @@ static int param_get_lid_init_state(char *buffer, struct kernel_param *kp)
        switch (lid_init_state) {
        case ACPI_BUTTON_LID_INIT_OPEN:
                return sprintf(buffer, "open");
+       case ACPI_BUTTON_LID_INIT_METHOD:
+               return sprintf(buffer, "method");
        case ACPI_BUTTON_LID_INIT_IGNORE:
                return sprintf(buffer, "ignore");
        default:
index 798d5003a039d876f275fc2d933be71cb7ebfbed..993fd31394c854c99e5ce0c2af824f36c50b7a22 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/pm_qos.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
-#include <linux/suspend.h>
 
 #include "internal.h"
 
@@ -400,7 +399,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
        mutex_lock(&acpi_pm_notifier_lock);
 
        if (adev->wakeup.flags.notifier_present) {
-               pm_wakeup_ws_event(adev->wakeup.ws, 0, true);
+               __pm_wakeup_event(adev->wakeup.ws, 0);
                if (adev->wakeup.context.work.func)
                        queue_pm_work(&adev->wakeup.context.work);
        }
index 3ba1c3472cf9e293ae70fd7cdc649eeb96abf1a0..fd86bec98dea37f0fdc7afbfdcf94d79227e8a44 100644 (file)
@@ -26,7 +26,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
        struct nfit_spa *nfit_spa;
 
        /* We only care about memory errors */
-       if (!(mce->status & MCACOD))
+       if (!mce_is_memory_error(mce))
                return NOTIFY_DONE;
 
        /*
index a6574d62634031ac6e351418b935a333b556b665..097d630ab8867267326121f9f4db2525cf06ef4b 100644 (file)
@@ -663,40 +663,14 @@ static int acpi_freeze_prepare(void)
        acpi_os_wait_events_complete();
        if (acpi_sci_irq_valid())
                enable_irq_wake(acpi_sci_irq);
-
        return 0;
 }
 
-static void acpi_freeze_wake(void)
-{
-       /*
-        * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
-        * that the SCI has triggered while suspended, so cancel the wakeup in
-        * case it has not been a wakeup event (the GPEs will be checked later).
-        */
-       if (acpi_sci_irq_valid() &&
-           !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
-               pm_system_cancel_wakeup();
-}
-
-static void acpi_freeze_sync(void)
-{
-       /*
-        * Process all pending events in case there are any wakeup ones.
-        *
-        * The EC driver uses the system workqueue, so that one needs to be
-        * flushed too.
-        */
-       acpi_os_wait_events_complete();
-       flush_scheduled_work();
-}
-
 static void acpi_freeze_restore(void)
 {
        acpi_disable_wakeup_devices(ACPI_STATE_S0);
        if (acpi_sci_irq_valid())
                disable_irq_wake(acpi_sci_irq);
-
        acpi_enable_all_runtime_gpes();
 }
 
@@ -708,8 +682,6 @@ static void acpi_freeze_end(void)
 static const struct platform_freeze_ops acpi_freeze_ops = {
        .begin = acpi_freeze_begin,
        .prepare = acpi_freeze_prepare,
-       .wake = acpi_freeze_wake,
-       .sync = acpi_freeze_sync,
        .restore = acpi_freeze_restore,
        .end = acpi_freeze_end,
 };
index 1b5ee1e0e5a3073457b0b15da34aadf292a544ba..e414fabf73158d77fba356be2c10354647e81a44 100644 (file)
@@ -333,14 +333,17 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
            container_of(bin_attr, struct acpi_table_attr, attr);
        struct acpi_table_header *table_header = NULL;
        acpi_status status;
+       ssize_t rc;
 
        status = acpi_get_table(table_attr->name, table_attr->instance,
                                &table_header);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
-       return memory_read_from_buffer(buf, count, &offset,
-                                      table_header, table_header->length);
+       rc = memory_read_from_buffer(buf, count, &offset, table_header,
+                       table_header->length);
+       acpi_put_table(table_header);
+       return rc;
 }
 
 static int acpi_table_attr_init(struct kobject *tables_obj,
index 2fc52407306c15c27b9fb0b11c2db4ef4641aeba..c69954023c2e7d8c235aace4b66a1d32298f36eb 100644 (file)
@@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
 {}
 #endif
 
+/*
+ * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
+ * as DUMMY, or detected but eventually get a "link down" and never get up
+ * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
+ * port_map may hold a value of 0x00.
+ *
+ * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
+ * and can significantly reduce the occurrence of the problem.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189471
+ */
+static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
+                                   struct pci_dev *pdev)
+{
+       static const struct dmi_system_id sysids[] = {
+               {
+                       .ident = "Acer Switch Alpha 12",
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                               DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
+                       },
+               },
+               { }
+       };
+
+       if (dmi_check_system(sysids)) {
+               dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
+               if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
+                       hpriv->port_map = 0x7;
+                       hpriv->cap = 0xC734FF02;
+               }
+       }
+}
+
 #ifdef CONFIG_ARM64
 /*
  * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
@@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                         "online status unreliable, applying workaround\n");
        }
 
+
+       /* Acer SA5-271 workaround modifies private_data */
+       acer_sa5_271_workaround(hpriv, pdev);
+
        /* CAP.NP sometimes indicate the index of the last enabled
         * port, at other times, that of the last possible port, so
         * determining the maximum port number requires looking at
index aaa761b9081cc02a75792302c741f7b54ebd9823..cd2eab6aa92ea245e1a3dab839be7fe8aa938cdb 100644 (file)
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
 
        irq = platform_get_irq(pdev, 0);
        if (irq <= 0) {
-               dev_err(dev, "no irq\n");
-               return -EINVAL;
+               if (irq != -EPROBE_DEFER)
+                       dev_err(dev, "no irq\n");
+               return irq;
        }
 
        hpriv->irq = irq;
index 2d83b8c7596567a020300d8dd1aeefeb33a6a055..e157a0e4441916b77b53c402741b74e124012cc9 100644 (file)
@@ -6800,7 +6800,7 @@ static int __init ata_parse_force_one(char **cur,
        }
 
        force_ent->port = simple_strtoul(id, &endp, 10);
-       if (p == endp || *endp != '\0') {
+       if (id == endp || *endp != '\0') {
                *reason = "invalid port/link";
                return -EINVAL;
        }
index b66bcda88320fefa399ac9653eca64d3045a6a96..3b2246dded74fbeed89d53f913c725ab6e5c0082 100644 (file)
@@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev)
        struct ata_host *host;
        struct mv_host_priv *hpriv;
        struct resource *res;
-       void __iomem *mmio;
        int n_ports = 0, irq = 0;
        int rc;
        int port;
@@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev)
         * Get the register base first
         */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       mmio = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(mmio))
-               return PTR_ERR(mmio);
+       if (res == NULL)
+               return -EINVAL;
 
        /* allocate host */
        if (pdev->dev.of_node) {
@@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev)
        hpriv->board_idx = chip_soc;
 
        host->iomap = NULL;
-       hpriv->base = mmio - SATAHC0_REG_BASE;
+       hpriv->base = devm_ioremap(&pdev->dev, res->start,
+                                  resource_size(res));
+       if (!hpriv->base)
+               return -ENOMEM;
+
+       hpriv->base -= SATAHC0_REG_BASE;
 
        hpriv->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(hpriv->clk))
index 5d38245a7a73a7cc4fa0d49255a52c7daead7886..b7939a2c1fab53ff2a94799a261d401c1c440f2e 100644 (file)
@@ -890,7 +890,10 @@ static int sata_rcar_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "failed to get access to sata clock\n");
                return PTR_ERR(priv->clk);
        }
-       clk_prepare_enable(priv->clk);
+
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        host = ata_host_alloc(&pdev->dev, 1);
        if (!host) {
@@ -970,8 +973,11 @@ static int sata_rcar_resume(struct device *dev)
        struct ata_host *host = dev_get_drvdata(dev);
        struct sata_rcar_priv *priv = host->private_data;
        void __iomem *base = priv->base;
+       int ret;
 
-       clk_prepare_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        /* ack and mask */
        iowrite32(0, base + SATAINTSTAT_REG);
@@ -988,8 +994,11 @@ static int sata_rcar_restore(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
        struct sata_rcar_priv *priv = host->private_data;
+       int ret;
 
-       clk_prepare_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        sata_rcar_setup_port(host);
 
index e987a6f55d36747f79b470b0372a1abcff6390d7..9faee1c893e53c8dea6e14d472a73a8b7131bf96 100644 (file)
@@ -1091,6 +1091,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
        if (async_error)
                goto Complete;
 
+       if (pm_wakeup_pending()) {
+               async_error = -EBUSY;
+               goto Complete;
+       }
+
        if (dev->power.syscore || dev->power.direct_complete)
                goto Complete;
 
index f62082fdd6703e11c7576dc5db673dbc7dbba056..c313b600d356260fd9b98fe4848340f9cbdcf9ae 100644 (file)
@@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly;
 /* First wakeup IRQ seen by the kernel in the last cycle. */
 unsigned int pm_wakeup_irq __read_mostly;
 
-/* If greater than 0 and the system is suspending, terminate the suspend. */
-static atomic_t pm_abort_suspend __read_mostly;
+/* If set and the system is suspending, terminate the suspend. */
+static bool pm_abort_suspend __read_mostly;
 
 /*
  * Combined counters of registered wakeup events and wakeup events in progress.
@@ -512,13 +512,12 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
 /**
  * wakup_source_activate - Mark given wakeup source as active.
  * @ws: Wakeup source to handle.
- * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
  *
  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
  * core of the event by incrementing the counter of of wakeup events being
  * processed.
  */
-static void wakeup_source_activate(struct wakeup_source *ws, bool hard)
+static void wakeup_source_activate(struct wakeup_source *ws)
 {
        unsigned int cec;
 
@@ -526,9 +525,6 @@ static void wakeup_source_activate(struct wakeup_source *ws, bool hard)
                        "unregistered wakeup source\n"))
                return;
 
-       if (hard)
-               pm_system_wakeup();
-
        ws->active = true;
        ws->active_count++;
        ws->last_time = ktime_get();
@@ -554,7 +550,10 @@ static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
                ws->wakeup_count++;
 
        if (!ws->active)
-               wakeup_source_activate(ws, hard);
+               wakeup_source_activate(ws);
+
+       if (hard)
+               pm_system_wakeup();
 }
 
 /**
@@ -856,26 +855,20 @@ bool pm_wakeup_pending(void)
                pm_print_active_wakeup_sources();
        }
 
-       return ret || atomic_read(&pm_abort_suspend) > 0;
+       return ret || pm_abort_suspend;
 }
 
 void pm_system_wakeup(void)
 {
-       atomic_inc(&pm_abort_suspend);
+       pm_abort_suspend = true;
        freeze_wake();
 }
 EXPORT_SYMBOL_GPL(pm_system_wakeup);
 
-void pm_system_cancel_wakeup(void)
-{
-       atomic_dec(&pm_abort_suspend);
-}
-
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(void)
 {
+       pm_abort_suspend = false;
        pm_wakeup_irq = 0;
-       if (reset)
-               atomic_set(&pm_abort_suspend, 0);
 }
 
 void pm_system_irq_wakeup(unsigned int irq_number)
index 9a7bb2c2944772cad8124a965bacc17d0aa8f935..f3f191ba8ca4bbe6b7d87a7accc84bd648e4d718 100644 (file)
@@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
        return -ENOSPC;
 }
 
-/* Reset all properties of an NBD device */
-static void nbd_reset(struct nbd_device *nbd)
-{
-       nbd->config = NULL;
-       nbd->tag_set.timeout = 0;
-       queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
-}
-
 static void nbd_bdev_reset(struct block_device *bdev)
 {
        if (bdev->bd_openers > 1)
@@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd)
                        }
                        kfree(config->socks);
                }
-               nbd_reset(nbd);
+               kfree(nbd->config);
+               nbd->config = NULL;
+
+               nbd->tag_set.timeout = 0;
+               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 
                mutex_unlock(&nbd->config_lock);
                nbd_put(nbd);
@@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index)
        disk->fops = &nbd_fops;
        disk->private_data = nbd;
        sprintf(disk->disk_name, "nbd%d", index);
-       nbd_reset(nbd);
        add_disk(disk);
        nbd_total_devices++;
        return index;
index 454bf9c34882f33d673ccbaf0c8afa4f3ee18ad4..c16f74547804ccb957275f6d59b705b0ba35eb6b 100644 (file)
@@ -4023,6 +4023,7 @@ static void rbd_queue_workfn(struct work_struct *work)
 
        switch (req_op(rq)) {
        case REQ_OP_DISCARD:
+       case REQ_OP_WRITE_ZEROES:
                op_type = OBJ_OP_DISCARD;
                break;
        case REQ_OP_WRITE:
@@ -4420,6 +4421,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        q->limits.discard_granularity = segment_size;
        q->limits.discard_alignment = segment_size;
        blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
+       blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
 
        if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
                q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
index d4dbd8d8e524d7b712f9668cbee57c7d722440b2..382c864814d944c79e610eaa434bc356d12bd335 100644 (file)
@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
 
        rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
        if (rc <= 0) {
-               DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+               DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
                DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                if (rc == -ERESTARTSYS)
                        return rc;
@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
        for (i = 0; i < bytes_to_write; i++) {
                rc = wait_for_bulk_out_ready(dev);
                if (rc <= 0) {
-                       DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+                       DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
                               rc);
                        DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                        if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
        rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
 
        if (rc <= 0) {
-               DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+               DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
                DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                if (rc == -ERESTARTSYS)
                        return rc;
index 0ab0249189072befe3cee1b8696052727f360540..a561f0c2f428df6cbd80e0fe5bfd3c479a578e18 100644 (file)
@@ -1097,12 +1097,16 @@ static void add_interrupt_bench(cycles_t start)
 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
 {
        __u32 *ptr = (__u32 *) regs;
+       unsigned long flags;
 
        if (regs == NULL)
                return 0;
+       local_irq_save(flags);
        if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
                f->reg_idx = 0;
-       return *(ptr + f->reg_idx++);
+       ptr += f->reg_idx++;
+       local_irq_restore(flags);
+       return *ptr;
 }
 
 void add_interrupt_randomness(int irq, int irq_flags)
index 74ed7e9a7f27cd333a6bdf48144c0de4e6e7469e..2011fec2d6ad9d5b4ad1b089c38a1be23baeb509 100644 (file)
@@ -71,6 +71,15 @@ config ARM_HIGHBANK_CPUFREQ
 
          If in doubt, say N.
 
+config ARM_DB8500_CPUFREQ
+       tristate "ST-Ericsson DB8500 cpufreq" if COMPILE_TEST && !ARCH_U8500
+       default ARCH_U8500
+       depends on HAS_IOMEM
+       depends on !CPU_THERMAL || THERMAL
+       help
+         This adds the CPUFreq driver for ST-Ericsson Ux500 (DB8500) SoC
+         series.
+
 config ARM_IMX6Q_CPUFREQ
        tristate "Freescale i.MX6 cpufreq support"
        depends on ARCH_MXC
index b7e78f063c4f1d373bc892f84f68d6b8000a3051..ab3a42cd29ef210bcf0cad2ee48c74cb954b14f2 100644 (file)
@@ -53,7 +53,7 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ)               += arm_big_little_dt.o
 
 obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ)  += brcmstb-avs-cpufreq.o
 obj-$(CONFIG_ARCH_DAVINCI)             += davinci-cpufreq.o
-obj-$(CONFIG_UX500_SOC_DB8500)         += dbx500-cpufreq.o
+obj-$(CONFIG_ARM_DB8500_CPUFREQ)       += dbx500-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)   += exynos5440-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)     += highbank-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)                += imx6q-cpufreq.o
index 0e3f6496524d92c7c1717d8d2259684952d7acb8..26b643d57847de0fca4afc099f4bec49d6326be9 100644 (file)
@@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
            list_empty(&cpufreq_policy_list)) {
                /* if all ->init() calls failed, unregister */
+               ret = -ENODEV;
                pr_debug("%s: No CPU initialized for driver %s\n", __func__,
                         driver_data->name);
                goto err_if_unreg;
index b7de5bd76a31743f52cc9095845495e596d0817d..eb1158532de31e7aee418162135a7495b10f9860 100644 (file)
@@ -571,9 +571,10 @@ static inline void update_turbo_state(void)
 static int min_perf_pct_min(void)
 {
        struct cpudata *cpu = all_cpu_data[0];
+       int turbo_pstate = cpu->pstate.turbo_pstate;
 
-       return DIV_ROUND_UP(cpu->pstate.min_pstate * 100,
-                           cpu->pstate.turbo_pstate);
+       return turbo_pstate ?
+               DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
 }
 
 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
index 1b9bcd76c60e334e72a31a3b6ff1a21410ac23ff..c2dd43f3f5d8a3092e6847f18d124f0631bdf065 100644 (file)
@@ -127,7 +127,12 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                return PTR_ERR(priv.cpu_clk);
        }
 
-       clk_prepare_enable(priv.cpu_clk);
+       err = clk_prepare_enable(priv.cpu_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare cpuclk\n");
+               return err;
+       }
+
        kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
 
        priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
@@ -137,7 +142,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                goto out_cpu;
        }
 
-       clk_prepare_enable(priv.ddr_clk);
+       err = clk_prepare_enable(priv.ddr_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare ddrclk\n");
+               goto out_cpu;
+       }
        kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
 
        priv.powersave_clk = of_clk_get_by_name(np, "powersave");
@@ -146,7 +155,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                err = PTR_ERR(priv.powersave_clk);
                goto out_ddr;
        }
-       clk_prepare_enable(priv.powersave_clk);
+       err = clk_prepare_enable(priv.powersave_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare powersave clk\n");
+               goto out_ddr;
+       }
 
        of_node_put(np);
        np = NULL;
index d37e8dda807900fe9725aa153c20c1c2bc927a52..ec240592f5c8e7a450e2c26c20feece9d12dafad 100644 (file)
@@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
        struct dma_device       dma_dev;
        bool                    m2m;
        int                     (*hw_setup)(struct ep93xx_dma_chan *);
+       void                    (*hw_synchronize)(struct ep93xx_dma_chan *);
        void                    (*hw_shutdown)(struct ep93xx_dma_chan *);
        void                    (*hw_submit)(struct ep93xx_dma_chan *);
        int                     (*hw_interrupt)(struct ep93xx_dma_chan *);
@@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
                | M2P_CONTROL_ENABLE;
        m2p_set_control(edmac, control);
 
+       edmac->buffer = 0;
+
        return 0;
 }
 
@@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
        return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 }
 
-static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 {
+       unsigned long flags;
        u32 control;
 
+       spin_lock_irqsave(&edmac->lock, flags);
        control = readl(edmac->regs + M2P_CONTROL);
        control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
        m2p_set_control(edmac, control);
+       spin_unlock_irqrestore(&edmac->lock, flags);
 
        while (m2p_channel_state(edmac) >= M2P_STATE_ON)
-               cpu_relax();
+               schedule();
+}
 
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
        m2p_set_control(edmac, 0);
 
-       while (m2p_channel_state(edmac) == M2P_STATE_STALL)
-               cpu_relax();
+       while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
+               dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 }
 
 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
@@ -1160,6 +1169,26 @@ fail:
        return NULL;
 }
 
+/**
+ * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ *
+ * Synchronizes the DMA channel termination to the current context. When this
+ * function returns it is guaranteed that all transfers for previously issued
+ * descriptors have stopped and and it is safe to free the memory associated
+ * with them. Furthermore it is guaranteed that all complete callback functions
+ * for a previously submitted descriptor have finished running and it is safe to
+ * free resources accessed from within the complete callbacks.
+ */
+static void ep93xx_dma_synchronize(struct dma_chan *chan)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+
+       if (edmac->edma->hw_synchronize)
+               edmac->edma->hw_synchronize(edmac);
+}
+
 /**
  * ep93xx_dma_terminate_all - terminate all transactions
  * @chan: channel
@@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
        dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
        dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
        dma_dev->device_config = ep93xx_dma_slave_config;
+       dma_dev->device_synchronize = ep93xx_dma_synchronize;
        dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
        dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
        dma_dev->device_tx_status = ep93xx_dma_tx_status;
@@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
        } else {
                dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
 
+               edma->hw_synchronize = m2p_hw_synchronize;
                edma->hw_setup = m2p_hw_setup;
                edma->hw_shutdown = m2p_hw_shutdown;
                edma->hw_submit = m2p_hw_submit;
index a28a01fcba674dc569e4d49ca6fd50def5a58645..f3e211f8f6c58c00080703f11b25937bb36dab39 100644 (file)
@@ -161,6 +161,7 @@ struct mv_xor_v2_device {
        struct mv_xor_v2_sw_desc *sw_desq;
        int desc_size;
        unsigned int npendings;
+       unsigned int hw_queue_idx;
 };
 
 /**
@@ -213,18 +214,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
        }
 }
 
-/*
- * Return the next available index in the DESQ.
- */
-static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
-{
-       /* read the index for the next available descriptor in the DESQ */
-       u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
-
-       return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
-               & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
-}
-
 /*
  * notify the engine of new descriptors, and update the available index.
  */
@@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
        return MV_XOR_V2_EXT_DESC_SIZE;
 }
 
-/*
- * Set the IMSG threshold
- */
-static inline
-void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
-{
-       u32 reg;
-
-       reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-
-       reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-       reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-
-       writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-}
-
 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 {
        struct mv_xor_v2_device *xor_dev = data;
@@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
        if (!ndescs)
                return IRQ_NONE;
 
-       /*
-        * Update IMSG threshold, to disable new IMSG interrupts until
-        * end of the tasklet
-        */
-       mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
-
        /* schedule a tasklet to handle descriptors callbacks */
        tasklet_schedule(&xor_dev->irq_tasklet);
 
@@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 static dma_cookie_t
 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-       int desq_ptr;
        void *dest_hw_desc;
        dma_cookie_t cookie;
        struct mv_xor_v2_sw_desc *sw_desc =
@@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
        spin_lock_bh(&xor_dev->lock);
        cookie = dma_cookie_assign(tx);
 
-       /* get the next available slot in the DESQ */
-       desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
-
        /* copy the HW descriptor from the SW descriptor to the DESQ */
-       dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
+       dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
 
        memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
 
        xor_dev->npendings++;
+       xor_dev->hw_queue_idx++;
+       if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
+               xor_dev->hw_queue_idx = 0;
 
        spin_unlock_bh(&xor_dev->lock);
 
@@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc     *
 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
 {
        struct mv_xor_v2_sw_desc *sw_desc;
+       bool found = false;
 
        /* Lock the channel */
        spin_lock_bh(&xor_dev->lock);
@@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
                return NULL;
        }
 
-       /* get a free SW descriptor from the SW DESQ */
-       sw_desc = list_first_entry(&xor_dev->free_sw_desc,
-                                  struct mv_xor_v2_sw_desc, free_list);
+       list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
+               if (async_tx_test_ack(&sw_desc->async_tx)) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               spin_unlock_bh(&xor_dev->lock);
+               return NULL;
+       }
+
        list_del(&sw_desc->free_list);
 
        /* Release the channel */
        spin_unlock_bh(&xor_dev->lock);
 
-       /* set the async tx descriptor */
-       dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
-       sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
-       async_tx_ack(&sw_desc->async_tx);
-
        return sw_desc;
 }
 
@@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
                __func__, len, &src, &dest, flags);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        sw_desc->async_tx.flags = flags;
 
@@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
                __func__, src_cnt, len, &dest, flags);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        sw_desc->async_tx.flags = flags;
 
@@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
                container_of(chan, struct mv_xor_v2_device, dmachan);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        /* set the HW descriptor */
        hw_descriptor = &sw_desc->hw_desc;
@@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
 {
        struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
        int pending_ptr, num_of_pending, i;
-       struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
        struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
 
        dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
@@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
        /* get the pending descriptors parameters */
        num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
 
-       /* next HW descriptor */
-       next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
-
        /* loop over free descriptors */
        for (i = 0; i < num_of_pending; i++) {
-
-               if (pending_ptr > MV_XOR_V2_DESC_NUM)
-                       pending_ptr = 0;
-
-               if (next_pending_sw_desc != NULL)
-                       next_pending_hw_desc++;
+               struct mv_xor_v2_descriptor *next_pending_hw_desc =
+                       xor_dev->hw_desq_virt + pending_ptr;
 
                /* get the SW descriptor related to the HW descriptor */
                next_pending_sw_desc =
@@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
 
                /* increment the next descriptor */
                pending_ptr++;
+               if (pending_ptr >= MV_XOR_V2_DESC_NUM)
+                       pending_ptr = 0;
        }
 
        if (num_of_pending != 0) {
                /* free the descriptores */
                mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
        }
-
-       /* Update IMSG threshold, to enable new IMSG interrupts */
-       mv_xor_v2_set_imsg_thrd(xor_dev, 0);
 }
 
 /*
@@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
        writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
 
-       /* enable the DMA engine */
-       writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
-
        /*
         * This is a temporary solution, until we activate the
         * SMMU. Set the attributes for reading & writing data buffers
@@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
        reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
        writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
 
+       /* enable the DMA engine */
+       writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
        return 0;
 }
 
@@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, xor_dev);
 
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+       if (ret)
+               return ret;
+
        xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
@@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 
        /* add all SW descriptors to the free list */
        for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
-               xor_dev->sw_desq[i].idx = i;
-               list_add(&xor_dev->sw_desq[i].free_list,
+               struct mv_xor_v2_sw_desc *sw_desc =
+                       xor_dev->sw_desq + i;
+               sw_desc->idx = i;
+               dma_async_tx_descriptor_init(&sw_desc->async_tx,
+                                            &xor_dev->dmachan);
+               sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+               async_tx_ack(&sw_desc->async_tx);
+
+               list_add(&sw_desc->free_list,
                         &xor_dev->free_sw_desc);
        }
 
index 8b0da7fa520d27ac514228130c354f121a12b848..e90a7a0d760af6d031fa465208e88f2dd6f056b4 100644 (file)
@@ -3008,7 +3008,8 @@ static int pl330_remove(struct amba_device *adev)
 
        for (i = 0; i < AMBA_NR_IRQS; i++) {
                irq = adev->irq[i];
-               devm_free_irq(&adev->dev, irq, pl330);
+               if (irq)
+                       devm_free_irq(&adev->dev, irq, pl330);
        }
 
        dma_async_device_unregister(&pl330->ddma);
index db41795fe42ae6ed355de41f12b5c90ea661bde4..bd261c9e9664b6ac951939641091bc0bb7466380 100644 (file)
@@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
        if (desc->hwdescs.use) {
                dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
                        RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+               if (dptr == 0)
+                       dptr = desc->nchunks;
+               dptr--;
                WARN_ON(dptr >= desc->nchunks);
        } else {
                running = desc->running;
index 72c649713aceecd75a20957522792702fca1696a..31a145154e9f26a8562e51223cffa50e65352075 100644 (file)
@@ -117,7 +117,7 @@ struct usb_dmac {
 #define USB_DMASWR                     0x0008
 #define USB_DMASWR_SWR                 (1 << 0)
 #define USB_DMAOR                      0x0060
-#define USB_DMAOR_AE                   (1 << 2)
+#define USB_DMAOR_AE                   (1 << 1)
 #define USB_DMAOR_DME                  (1 << 0)
 
 #define USB_DMASAR                     0x0000
index 44c01390d0353fd3170fc797eb4ce6393229bd14..dc269cb288c209d60e780eff287af2930fb4c477 100644 (file)
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,               0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,     0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,      0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,                0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0400, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,                0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,          0444, DMI_BOARD_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(board_version,       0444, DMI_BOARD_VERSION);
@@ -191,6 +192,7 @@ static void __init dmi_id_init_attr_table(void)
        ADD_DMI_ATTR(product_version,   DMI_PRODUCT_VERSION);
        ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
        ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
+       ADD_DMI_ATTR(product_family,      DMI_PRODUCT_FAMILY);
        ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
        ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
        ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
index 54be60ead08f8068c18dc9bbfd5a40e3cb26685c..93f7acdaac7ac19c057fc6b98a76c270a4646f24 100644 (file)
@@ -430,6 +430,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
                dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
                dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
                dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+               dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
                break;
        case 2:         /* Base Board Information */
                dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
index 04ca8764f0c096f4e3f006ab74e4dc55996735a1..8bf27323f7a37c34591c45f8b39d2091ae096260 100644 (file)
@@ -36,6 +36,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
        if (acpi_disabled)
                return;
 
+       if (!efi_enabled(EFI_BOOT))
+               return;
+
        if (table->length < sizeof(bgrt_tab)) {
                pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
                       table->length, sizeof(bgrt_tab));
index ab3a951a17e6c182dfa1c68b579ee003b3c1c44a..ef1fafdad4008ae7fa1824396592e0e53cf61002 100644 (file)
@@ -53,6 +53,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
        if (sscanf(name, "dump-type%u-%u-%d-%lu-%c",
                   &record->type, &part, &cnt, &time, &data_type) == 5) {
                record->id = generic_id(time, part, cnt);
+               record->part = part;
                record->count = cnt;
                record->time.tv_sec = time;
                record->time.tv_nsec = 0;
@@ -64,6 +65,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
        } else if (sscanf(name, "dump-type%u-%u-%d-%lu",
                   &record->type, &part, &cnt, &time) == 4) {
                record->id = generic_id(time, part, cnt);
+               record->part = part;
                record->count = cnt;
                record->time.tv_sec = time;
                record->time.tv_nsec = 0;
@@ -77,6 +79,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
                 * multiple logs, remains.
                 */
                record->id = generic_id(time, part, 0);
+               record->part = part;
                record->count = 0;
                record->time.tv_sec = time;
                record->time.tv_nsec = 0;
@@ -241,9 +244,15 @@ static int efi_pstore_write(struct pstore_record *record)
        efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
        int i, ret = 0;
 
+       record->time.tv_sec = get_seconds();
+       record->time.tv_nsec = 0;
+
+       record->id = generic_id(record->time.tv_sec, record->part,
+                               record->count);
+
        snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu-%c",
                 record->type, record->part, record->count,
-                get_seconds(), record->compressed ? 'C' : 'D');
+                record->time.tv_sec, record->compressed ? 'C' : 'D');
 
        for (i = 0; i < DUMP_NAME_LEN; i++)
                efi_name[i] = name[i];
@@ -255,7 +264,6 @@ static int efi_pstore_write(struct pstore_record *record)
        if (record->reason == KMSG_DUMP_OOPS)
                efivar_run_worker();
 
-       record->id = record->part;
        return ret;
 };
 
@@ -287,7 +295,7 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
                 * holding multiple logs, remains.
                 */
                snprintf(name_old, sizeof(name_old), "dump-type%u-%u-%lu",
-                       ed->record->type, (unsigned int)ed->record->id,
+                       ed->record->type, ed->record->part,
                        ed->record->time.tv_sec);
 
                for (i = 0; i < DUMP_NAME_LEN; i++)
@@ -320,10 +328,7 @@ static int efi_pstore_erase(struct pstore_record *record)
        char name[DUMP_NAME_LEN];
        efi_char16_t efi_name[DUMP_NAME_LEN];
        int found, i;
-       unsigned int part;
 
-       do_div(record->id, 1000);
-       part = do_div(record->id, 100);
        snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu",
                 record->type, record->part, record->count,
                 record->time.tv_sec);
index 8c34d50a4d8032bbaba3322b3dee4ff22826a923..959777ec8a77bab62e49097cd93d711ccd311610 100644 (file)
 
 /* BIOS variables */
 static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
-static const efi_char16_t const efi_SecureBoot_name[] = {
+static const efi_char16_t efi_SecureBoot_name[] = {
        'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0
 };
-static const efi_char16_t const efi_SetupMode_name[] = {
+static const efi_char16_t efi_SetupMode_name[] = {
        'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0
 };
 
index 236d9950221b62665e8728941faa5793fc757980..c0d8c6ff6380e8a69de8faf58d28963dde29c8de 100644 (file)
@@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
 
 void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
 {
-       struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
+       struct amdgpu_fbdev *afbdev;
        struct drm_fb_helper *fb_helper;
        int ret;
 
+       if (!adev)
+               return;
+
+       afbdev = adev->mode_info.rfbdev;
+
        if (!afbdev)
                return;
 
index 07ff3b1514f129edc23875c1f42053f7ef1aaa72..8ecf82c5fe74dc4d34e55d4dbaec5931bca1a2e8 100644 (file)
@@ -634,7 +634,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
                mutex_unlock(&id_mgr->lock);
        }
 
-       if (gds_switch_needed) {
+       if (ring->funcs->emit_gds_switch && gds_switch_needed) {
                id->gds_base = job->gds_base;
                id->gds_size = job->gds_size;
                id->gws_base = job->gws_base;
@@ -672,6 +672,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
        struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
        struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
 
+       atomic64_set(&id->owner, 0);
        id->gds_base = 0;
        id->gds_size = 0;
        id->gws_base = 0;
@@ -680,6 +681,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
        id->oa_size = 0;
 }
 
+/**
+ * amdgpu_vm_reset_all_id - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ *
+ * Reset VMID to force flush on next use
+ */
+void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
+{
+       unsigned i, j;
+
+       for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+               struct amdgpu_vm_id_manager *id_mgr =
+                       &adev->vm_manager.id_mgr[i];
+
+               for (j = 1; j < id_mgr->num_ids; ++j)
+                       amdgpu_vm_reset_id(adev, i, j);
+       }
+}
+
 /**
  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
  *
@@ -2270,7 +2291,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                adev->vm_manager.seqno[i] = 0;
 
-
        atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
        atomic64_set(&adev->vm_manager.client_counter, 0);
        spin_lock_init(&adev->vm_manager.prt_lock);
index d97e28b4bdc41cbb52e70647685b58db4886514a..e1d951ece4333672512f96ae914ee740f0a5922b 100644 (file)
@@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
                        unsigned vmid);
+void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
                                 struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
index a4831fe0223bffebdda4727589453aa6cb8fb7b1..a2c59a08b2bd69919b23989feddff71a88855617 100644 (file)
@@ -220,9 +220,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
 }
 
 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
-       amdgpu_vram_mgr_init,
-       amdgpu_vram_mgr_fini,
-       amdgpu_vram_mgr_new,
-       amdgpu_vram_mgr_del,
-       amdgpu_vram_mgr_debug
+       .init           = amdgpu_vram_mgr_init,
+       .takedown       = amdgpu_vram_mgr_fini,
+       .get_node       = amdgpu_vram_mgr_new,
+       .put_node       = amdgpu_vram_mgr_del,
+       .debug          = amdgpu_vram_mgr_debug
 };
index 6dc1410b380f376982551dbebe06f4dd84edf3b2..ec93714e4524eeaf80dbd5181da396e48024827b 100644 (file)
@@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
        u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
        u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
 
+       /* disable mclk switching if the refresh is >120Hz, even if the
+        * blanking period would allow it
+        */
+       if (amdgpu_dpm_get_vrefresh(adev) > 120)
+               return true;
+
        if (vblank_time < switch_limit)
                return true;
        else
index a572979f186cdaeba52701fb8183850e16581143..d860939152df234517e7806135015a23aa316df9 100644 (file)
@@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v6_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v6_0_hw_fini(adev);
 
        return 0;
@@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v6_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v6_0_is_idle(void *handle)
index a9083a16a250920c64605bc447ca19c8b3014f3a..2750e5c2381301ceebddb8f614f7e5868de23d2c 100644 (file)
@@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v7_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v7_0_hw_fini(adev);
 
        return 0;
@@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v7_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v7_0_is_idle(void *handle)
index 4ac99784160a3ed9fbb59c5bf53a8d1b0a6ec1b4..f56b4089ee9f3fe7581cd6c541d434867c613cbb 100644 (file)
@@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v8_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v8_0_hw_fini(adev);
 
        return 0;
@@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v8_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v8_0_is_idle(void *handle)
index dc1e1c1d6b2430cb9957047a454cde87bf439561..f936332a069d2d1c9329a3d52a17f9e44776f659 100644 (file)
@@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v9_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v9_0_hw_fini(adev);
 
        return 0;
@@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v9_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev,
-                               "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v9_0_is_idle(void *handle)
index fb08193599092d0d6562e0c5b4c019b6be45bed2..90332f55cfba91b7a543da4ec3820809bb876336 100644 (file)
@@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle,
 static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
+       u32 v;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
        if (ring == &adev->vce.ring[0])
-               return RREG32(mmVCE_RB_RPTR);
+               v = RREG32(mmVCE_RB_RPTR);
        else if (ring == &adev->vce.ring[1])
-               return RREG32(mmVCE_RB_RPTR2);
+               v = RREG32(mmVCE_RB_RPTR2);
        else
-               return RREG32(mmVCE_RB_RPTR3);
+               v = RREG32(mmVCE_RB_RPTR3);
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return v;
 }
 
 /**
@@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
+       u32 v;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
        if (ring == &adev->vce.ring[0])
-               return RREG32(mmVCE_RB_WPTR);
+               v = RREG32(mmVCE_RB_WPTR);
        else if (ring == &adev->vce.ring[1])
-               return RREG32(mmVCE_RB_WPTR2);
+               v = RREG32(mmVCE_RB_WPTR2);
        else
-               return RREG32(mmVCE_RB_WPTR3);
+               v = RREG32(mmVCE_RB_WPTR3);
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return v;
 }
 
 /**
@@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
+
        if (ring == &adev->vce.ring[0])
                WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
        else if (ring == &adev->vce.ring[1])
                WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
        else
                WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
 }
 
 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
@@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        struct amdgpu_ring *ring;
        int idx, r;
 
-       ring = &adev->vce.ring[0];
-       WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
-
-       ring = &adev->vce.ring[1];
-       WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
-
-       ring = &adev->vce.ring[2];
-       WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
-
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
                WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
+
+               /* Program instance 0 reg space for two instances or instance 0 case
+               program instance 1 reg space for only instance 1 available case */
+               if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
+                       ring = &adev->vce.ring[0];
+                       WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+                       ring = &adev->vce.ring[1];
+                       WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+                       ring = &adev->vce.ring[2];
+                       WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+               }
+
                vce_v3_0_mc_resume(adev, idx);
                WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
index a74a3db3056c9c4a991e2b4525eb09de8a234a0a..102eb6d029faeb27887215ada8aeccf93d4039b0 100644 (file)
@@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
        return sizeof(struct smu7_power_state);
 }
 
+static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
+                                uint32_t vblank_time_us)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t switch_limit_us;
+
+       switch (hwmgr->chip_id) {
+       case CHIP_POLARIS10:
+       case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
+               switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+               break;
+       default:
+               switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+               break;
+       }
+
+       if (vblank_time_us < switch_limit_us)
+               return true;
+       else
+               return false;
+}
 
 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                struct pp_power_state *request_ps,
@@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
        bool disable_mclk_switching;
        bool disable_mclk_switching_for_frame_lock;
        struct cgs_display_info info = {0};
+       struct cgs_mode_info mode_info = {0};
        const struct phm_clock_and_voltage_limits *max_limits;
        uint32_t i;
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
        int32_t count;
        int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
 
+       info.mode_info = &mode_info;
        data->battery_state = (PP_StateUILabel_Battery ==
                        request_ps->classification.ui_label);
 
@@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
        cgs_get_active_displays_info(hwmgr->device, &info);
 
-       /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
        minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
        minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
 
@@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
 
 
-       disable_mclk_switching = (1 < info.display_count) ||
-                                   disable_mclk_switching_for_frame_lock;
+       disable_mclk_switching = ((1 < info.display_count) ||
+                                 disable_mclk_switching_for_frame_lock ||
+                                 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
+                                 (mode_info.refresh_rate > 120));
 
        sclk = smu7_ps->performance_levels[0].engine_clock;
        mclk = smu7_ps->performance_levels[0].memory_clock;
index ad30f5d3a10d5ea2a0118203ff66110a11b0583a..2614af2f553f3007ae25cb70e1f8f322c23a62d6 100644 (file)
@@ -4186,7 +4186,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask)
 {
        struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
-       uint32_t i;
+       int i;
 
        if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
                return -EINVAL;
index d5f53d04fa08c30a4053aa9fd07b04c795177e4e..83e40fe51b6212f6bbcf44b119c47a86b3fb1638 100644 (file)
@@ -709,17 +709,17 @@ static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
 
 static struct phm_master_table_item
 vega10_thermal_start_thermal_controller_master_list[] = {
-       {NULL, tf_vega10_thermal_initialize},
-       {NULL, tf_vega10_thermal_set_temperature_range},
-       {NULL, tf_vega10_thermal_enable_alert},
+       { .tableFunction = tf_vega10_thermal_initialize },
+       { .tableFunction = tf_vega10_thermal_set_temperature_range },
+       { .tableFunction = tf_vega10_thermal_enable_alert },
 /* We should restrict performance levels to low before we halt the SMC.
  * On the other hand we are still in boot state when we do this
  * so it would be pointless.
  * If this assumption changes we have to revisit this table.
  */
-       {NULL, tf_vega10_thermal_setup_fan_table},
-       {NULL, tf_vega10_thermal_start_smc_fan_control},
-       {NULL, NULL}
+       { .tableFunction = tf_vega10_thermal_setup_fan_table },
+       { .tableFunction = tf_vega10_thermal_start_smc_fan_control },
+       { }
 };
 
 static struct phm_master_table_header
@@ -731,10 +731,10 @@ vega10_thermal_start_thermal_controller_master = {
 
 static struct phm_master_table_item
 vega10_thermal_set_temperature_range_master_list[] = {
-       {NULL, tf_vega10_thermal_disable_alert},
-       {NULL, tf_vega10_thermal_set_temperature_range},
-       {NULL, tf_vega10_thermal_enable_alert},
-       {NULL, NULL}
+       { .tableFunction = tf_vega10_thermal_disable_alert },
+       { .tableFunction = tf_vega10_thermal_set_temperature_range },
+       { .tableFunction = tf_vega10_thermal_enable_alert },
+       { }
 };
 
 struct phm_master_table_header
index 8be9719284b047f3d6046fd6a22c80bb617fa0f7..aa885a614e27c9882ea597456c4b2e16fb25b62b 100644 (file)
@@ -508,6 +508,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
                bool has_connectors =
                        !!new_crtc_state->connector_mask;
 
+               WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
                if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
                        DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
                                         crtc->base.id, crtc->name);
@@ -551,6 +553,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
        for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
                const struct drm_connector_helper_funcs *funcs = connector->helper_private;
 
+               WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
                /*
                 * This only sets crtc->connectors_changed for routing changes,
                 * drivers must set crtc->connectors_changed themselves when
@@ -650,6 +654,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
        for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
 
+               WARN_ON(!drm_modeset_is_locked(&plane->mutex));
+
                funcs = plane->helper_private;
 
                drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
@@ -2663,7 +2669,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
 
        drm_modeset_acquire_init(&ctx, 0);
        while (1) {
+               err = drm_modeset_lock_all_ctx(dev, &ctx);
+               if (err)
+                       goto out;
+
                err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+out:
                if (err != -EDEADLK)
                        break;
 
index 3e5f52110ea17384c84f568ba9b1a4955922523c..213fb837e1c40fe79bf536d54b083d99dee1c192 100644 (file)
@@ -1208,3 +1208,86 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
        return 0;
 }
 EXPORT_SYMBOL(drm_dp_stop_crc);
+
+struct dpcd_quirk {
+       u8 oui[3];
+       bool is_branch;
+       u32 quirks;
+};
+
+#define OUI(first, second, third) { (first), (second), (third) }
+
+static const struct dpcd_quirk dpcd_quirk_list[] = {
+       /* Analogix 7737 needs reduced M and N at HBR2 link rates */
+       { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) },
+};
+
+#undef OUI
+
+/*
+ * Get a bit mask of DPCD quirks for the sink/branch device identified by
+ * ident. The quirk data is shared but it's up to the drivers to act on the
+ * data.
+ *
+ * For now, only the OUI (first three bytes) is used, but this may be extended
+ * to device identification string and hardware/firmware revisions later.
+ */
+static u32
+drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
+{
+       const struct dpcd_quirk *quirk;
+       u32 quirks = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) {
+               quirk = &dpcd_quirk_list[i];
+
+               if (quirk->is_branch != is_branch)
+                       continue;
+
+               if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0)
+                       continue;
+
+               quirks |= quirk->quirks;
+       }
+
+       return quirks;
+}
+
+/**
+ * drm_dp_read_desc - read sink/branch descriptor from DPCD
+ * @aux: DisplayPort AUX channel
+ * @desc: Device decriptor to fill from DPCD
+ * @is_branch: true for branch devices, false for sink devices
+ *
+ * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the
+ * identification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+                    bool is_branch)
+{
+       struct drm_dp_dpcd_ident *ident = &desc->ident;
+       unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
+       int ret, dev_id_len;
+
+       ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+       if (ret < 0)
+               return ret;
+
+       desc->quirks = drm_dp_get_quirks(ident, is_branch);
+
+       dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
+
+       DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+                     is_branch ? "branch" : "sink",
+                     (int)sizeof(ident->oui), ident->oui,
+                     dev_id_len, ident->device_id,
+                     ident->hw_rev >> 4, ident->hw_rev & 0xf,
+                     ident->sw_major_rev, ident->sw_minor_rev,
+                     desc->quirks);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_read_desc);
index b5c6bb46a4251bdac218832ba61e52109e5a1768..37b8ad3e30d80440aea9ea2654a7a99696b50a57 100644 (file)
@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
 void drm_unplug_dev(struct drm_device *dev)
 {
        /* for a USB device */
-       drm_dev_unregister(dev);
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_modeset_unregister_all(dev);
+
+       drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
+       drm_minor_unregister(dev, DRM_MINOR_RENDER);
+       drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 
        mutex_lock(&drm_global_mutex);
 
index fedd4d60d9cd5b2bab64ed8aae1391ea2f402fc5..5dc8c4350602a561fe4cfd77fce77e26770696fb 100644 (file)
@@ -948,8 +948,6 @@ retry:
        }
 
 out:
-       if (ret && crtc->funcs->page_flip_target)
-               drm_crtc_vblank_put(crtc);
        if (fb)
                drm_framebuffer_put(fb);
        if (crtc->primary->old_fb)
@@ -964,5 +962,8 @@ out:
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
 
+       if (ret && crtc->funcs->page_flip_target)
+               drm_crtc_vblank_put(crtc);
+
        return ret;
 }
index 09d3c4c3c858e8a05dcf4cbc9a14feb5910ff132..50294a7bd29da10f99e9dca59701b66c385bb66b 100644 (file)
@@ -82,14 +82,9 @@ err_file_priv_free:
        return ret;
 }
 
-static void exynos_drm_preclose(struct drm_device *dev,
-                                       struct drm_file *file)
-{
-       exynos_drm_subdrv_close(dev, file);
-}
-
 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 {
+       exynos_drm_subdrv_close(dev, file);
        kfree(file->driver_priv);
        file->driver_priv = NULL;
 }
@@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
                                  | DRIVER_ATOMIC | DRIVER_RENDER,
        .open                   = exynos_drm_open,
-       .preclose               = exynos_drm_preclose,
        .lastclose              = exynos_drm_lastclose,
        .postclose              = exynos_drm_postclose,
        .gem_free_object_unlocked = exynos_drm_gem_free_object,
index cb317693059696b3c86bda9c93c2e5ebcefad921..39c740572034a6d4f3d69f2ab861b1d0a8f80803 100644 (file)
@@ -160,12 +160,9 @@ struct exynos_drm_clk {
  *     drm framework doesn't support multiple irq yet.
  *     we can refer to the crtc to current hardware interrupt occurred through
  *     this pipe value.
- * @enabled: if the crtc is enabled or not
- * @event: vblank event that is currently queued for flip
- * @wait_update: wait all pending planes updates to finish
- * @pending_update: number of pending plane updates in this crtc
  * @ops: pointer to callbacks for exynos drm specific functionality
  * @ctx: A pointer to the crtc's implementation specific context
+ * @pipe_clk: A pointer to the crtc's pipeline clock.
  */
 struct exynos_drm_crtc {
        struct drm_crtc                 base;
index fc4fda738906251de7a6047c3fc8f3699c469b79..d404de86d5f9de1d5fe07f856fe4a10fafdefd91 100644 (file)
@@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
 {
        struct device *dev = dsi->dev;
        struct device_node *node = dev->of_node;
-       struct device_node *ep;
        int ret;
 
        ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
        if (ret < 0)
                return ret;
 
-       ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
-       if (!ep) {
-               dev_err(dev, "no output port with endpoint specified\n");
-               return -EINVAL;
-       }
-
-       ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
+       ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
                                     &dsi->burst_clk_rate);
        if (ret < 0)
-               goto end;
+               return ret;
 
-       ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
+       ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
                                     &dsi->esc_clk_rate);
        if (ret < 0)
-               goto end;
-
-       of_node_put(ep);
+               return ret;
 
        dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
        if (!dsi->bridge_node)
                return -EINVAL;
 
-end:
-       of_node_put(ep);
-
-       return ret;
+       return 0;
 }
 
 static int exynos_dsi_bind(struct device *dev, struct device *master,
@@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 
 static int exynos_dsi_remove(struct platform_device *pdev)
 {
+       struct exynos_dsi *dsi = platform_get_drvdata(pdev);
+
+       of_node_put(dsi->bridge_node);
+
        pm_runtime_disable(&pdev->dev);
 
        component_del(&pdev->dev, &exynos_dsi_component_ops);
index 0066fe7e622ef75de181cc54770daeb2006a2d24..be3eefec5152aa0cdf9dd891bf4d4fe423ff87d4 100644 (file)
@@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
                if (scan->type & DRM_MODE_TYPE_PREFERRED) {
                        mode_dev->panel_fixed_mode =
                            drm_mode_duplicate(dev, scan);
+                       DRM_DEBUG_KMS("Using mode from DDC\n");
                        goto out;       /* FIXME: check for quirks */
                }
        }
 
        /* Failed to get EDID, what about VBT? do we need this? */
-       if (mode_dev->vbt_mode)
+       if (dev_priv->lfp_lvds_vbt_mode) {
                mode_dev->panel_fixed_mode =
-                   drm_mode_duplicate(dev, mode_dev->vbt_mode);
+                       drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
 
-       if (!mode_dev->panel_fixed_mode)
-               if (dev_priv->lfp_lvds_vbt_mode)
-                       mode_dev->panel_fixed_mode =
-                               drm_mode_duplicate(dev,
-                                       dev_priv->lfp_lvds_vbt_mode);
+               if (mode_dev->panel_fixed_mode) {
+                       mode_dev->panel_fixed_mode->type |=
+                               DRM_MODE_TYPE_PREFERRED;
+                       DRM_DEBUG_KMS("Using mode from VBT\n");
+                       goto out;
+               }
+       }
 
        /*
         * If we didn't get EDID, try checking if the panel is already turned
@@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
                if (mode_dev->panel_fixed_mode) {
                        mode_dev->panel_fixed_mode->type |=
                            DRM_MODE_TYPE_PREFERRED;
+                       DRM_DEBUG_KMS("Using pre-programmed mode\n");
                        goto out;       /* FIXME: check for quirks */
                }
        }
index 5abc69c9630fc28789a32c87b19e44397d1d58ba..f77dcfaade6c5dfb74d7d600d8181d4fbb006f76 100644 (file)
@@ -760,7 +760,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
         * Get the endpoint node. In our case, dsi has one output port1
         * to which the external HDMI bridge is connected.
         */
-       ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge);
+       ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
        if (ret)
                return ret;
 
index dca989eb2d42ed48f6c13c15fe9d3f8a9cbfaab2..24fe04d6307b0383da918308827a12027ded9fbe 100644 (file)
@@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine;
+       struct intel_vgpu_workload *pos, *n;
+       unsigned int tmp;
+
+       /* free the unsubmited workloads in the queues. */
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+               list_for_each_entry_safe(pos, n,
+                       &vgpu->workload_q_head[engine->id], list) {
+                       list_del_init(&pos->list);
+                       free_workload(pos);
+               }
+       }
+}
+
 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
 {
+       clean_workloads(vgpu, ALL_ENGINES);
        kmem_cache_destroy(vgpu->workloads);
 }
 
@@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine;
-       struct intel_vgpu_workload *pos, *n;
        unsigned int tmp;
 
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
-               /* free the unsubmited workload in the queue */
-               list_for_each_entry_safe(pos, n,
-                       &vgpu->workload_q_head[engine->id], list) {
-                       list_del_init(&pos->list);
-                       free_workload(pos);
-               }
-
+       clean_workloads(vgpu, engine_mask);
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                init_vgpu_execlist(vgpu, engine->id);
-       }
 }
index c995e540ff96e1f8a18a9232de2b26794fa03aa2..0ffd696545927277200d8b2332a024168672c5c5 100644 (file)
@@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       i915_reg_t reg = {.reg = offset};
+       u32 v = *(u32 *)p_data;
+
+       if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
+               return intel_vgpu_default_mmio_write(vgpu,
+                               offset, p_data, bytes);
 
        switch (offset) {
        case 0x4ddc:
-               vgpu_vreg(vgpu, offset) = 0x8000003c;
-               /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
-               I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
                break;
        case 0x42080:
-               vgpu_vreg(vgpu, offset) = 0x8000;
-               /* WaCompressedResourceDisplayNewHashMode:skl */
-               I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+               /* bypass WaCompressedResourceDisplayNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
+               break;
+       case 0xe194:
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
+               break;
+       case 0x7014:
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
                break;
        default:
                return -EINVAL;
@@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
-       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+                skl_misc_ctl_write);
        MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x6e570, D_BDW_PLUS);
        MMIO_D(0x65f10, D_BDW_PLUS);
 
-       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
+                skl_misc_ctl_write);
        MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
index 3036d4835b0fa7a3b366a31d0b6ed18fc7889ae1..48428672fc6ece0927416d17a8dde8c41f00f500 100644 (file)
@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_fini;
 
        pci_set_drvdata(pdev, &dev_priv->drm);
+       /*
+        * Disable the system suspend direct complete optimization, which can
+        * leave the device suspended skipping the driver's suspend handlers
+        * if the device was already runtime suspended. This is needed due to
+        * the difference in our runtime and system suspend sequence and
+        * becaue the HDA driver may require us to enable the audio power
+        * domain during system suspend.
+        */
+       pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
 
        ret = i915_driver_init_early(dev_priv, ent);
        if (ret < 0)
@@ -1272,10 +1281,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        dev_priv->ipc_enabled = false;
 
-       /* Everything is in place, we can now relax! */
-       DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
-                driver.name, driver.major, driver.minor, driver.patchlevel,
-                driver.date, pci_name(pdev), dev_priv->drm.primary->index);
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
                DRM_INFO("DRM_I915_DEBUG enabled\n");
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
index c9b0949f6c1a2aba281c9a4bbf8d8b2c9ede3785..2c453a4e97d5ba28ca3a21da372f73eed0131268 100644 (file)
@@ -562,7 +562,8 @@ struct intel_link_m_n {
 
 void intel_link_compute_m_n(int bpp, int nlanes,
                            int pixel_clock, int link_clock,
-                           struct intel_link_m_n *m_n);
+                           struct intel_link_m_n *m_n,
+                           bool reduce_m_n);
 
 /* Interface history:
  *
@@ -2990,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
        return false;
 }
 
+static inline bool
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+       if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
+               return true;
+#endif
+       return false;
+}
+
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                                int enable_ppgtt);
 
index b6ac3df18b582534b118ab44aae1dbfe9f75186e..462031cbd77f714b23a3b7645039c0d8dba71f40 100644 (file)
@@ -3298,6 +3298,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 {
        int ret;
 
+       /* If the device is asleep, we have no requests outstanding */
+       if (!READ_ONCE(i915->gt.awake))
+               return 0;
+
        if (flags & I915_WAIT_LOCKED) {
                struct i915_gem_timeline *tl;
 
index a0563e18d753fd84731f8372efc7a938d2898a6b..f1989b8792dd6f21ba1a944113b424fb8dc3184d 100644 (file)
@@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
                gen8_set_pte(&gtt_base[i], scratch_pte);
 }
 
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+       struct drm_i915_private *dev_priv = vm->i915;
+
+       /*
+        * Make sure the internal GAM fifo has been cleared of all GTT
+        * writes before exiting stop_machine(). This guarantees that
+        * any aperture accesses waiting to start in another process
+        * cannot back up behind the GTT writes causing a hang.
+        * The register can be any arbitrary GAM register.
+        */
+       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+       struct i915_address_space *vm;
+       dma_addr_t addr;
+       u64 offset;
+       enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+       struct insert_page *arg = _arg;
+
+       gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+                                         dma_addr_t addr,
+                                         u64 offset,
+                                         enum i915_cache_level level,
+                                         u32 unused)
+{
+       struct insert_page arg = { vm, addr, offset, level };
+
+       stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+       struct i915_address_space *vm;
+       struct sg_table *st;
+       u64 start;
+       enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+       struct insert_entries *arg = _arg;
+
+       gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+                                            struct sg_table *st,
+                                            u64 start,
+                                            enum i915_cache_level level,
+                                            u32 unused)
+{
+       struct insert_entries arg = { vm, st, start, level };
+
+       stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+       struct i915_address_space *vm;
+       u64 start;
+       u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+       struct clear_range *arg = _arg;
+
+       gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+                                         u64 start,
+                                         u64 length)
+{
+       struct clear_range arg = { vm, start, length };
+
+       stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  u64 start, u64 length)
 {
@@ -2313,7 +2408,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
                    appgtt->base.allocate_va_range) {
                        ret = appgtt->base.allocate_va_range(&appgtt->base,
                                                             vma->node.start,
-                                                            vma->node.size);
+                                                            vma->size);
                        if (ret)
                                goto err_pages;
                }
@@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
        ggtt->base.insert_entries = gen8_ggtt_insert_entries;
 
+       /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+       if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+               ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+               ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
+               if (ggtt->base.clear_range != nop_clear_range)
+                       ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+       }
+
        ggtt->invalidate = gen6_ggtt_invalidate;
 
        return ggtt_probe_common(ggtt, size);
@@ -2997,7 +3100,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
 
 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 {
-       i915->ggtt.invalidate = gen6_ggtt_invalidate;
+       if (i915->ggtt.invalidate == guc_ggtt_invalidate)
+               i915->ggtt.invalidate = gen6_ggtt_invalidate;
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
index 129ed303a6c46e2f856eb1abc84990079abefb65..57d9f7f4ef159cd6eb30f9bc0bd10683eec5123f 100644 (file)
@@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
                return;
 
        mutex_unlock(&dev->struct_mutex);
-
-       /* expedite the RCU grace period to free some request slabs */
-       synchronize_rcu_expedited();
 }
 
 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
@@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
                                I915_SHRINK_ACTIVE);
        intel_runtime_pm_put(dev_priv);
 
-       synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
-
        return freed;
 }
 
index a0d6d4317a490bba6487891a4048ddef6b358fe4..fb5231f98c0d620f1ccf03a9872607b1373dc0e2 100644 (file)
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
                        obj->mm.quirked = false;
                }
                if (!i915_gem_object_is_tiled(obj)) {
-                       GEM_BUG_ON(!obj->mm.quirked);
+                       GEM_BUG_ON(obj->mm.quirked);
                        __i915_gem_object_pin_pages(obj);
                        obj->mm.quirked = true;
                }
index fd97fe00cd0d2ad00e1c7258eeb51ecf0f60d4c1..190f6aa5d15eb82bf51cbaed00b16ea8c5d4f5bc 100644 (file)
@@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
        u32 pipestat_mask;
        u32 enable_mask;
        enum pipe pipe;
-       u32 val;
 
        pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
                        PIPE_CRC_DONE_INTERRUPT_STATUS;
@@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 
        enable_mask = I915_DISPLAY_PORT_INTERRUPT |
                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+               I915_LPE_PIPE_A_INTERRUPT |
+               I915_LPE_PIPE_B_INTERRUPT;
+
        if (IS_CHERRYVIEW(dev_priv))
-               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
+                       I915_LPE_PIPE_C_INTERRUPT;
 
        WARN_ON(dev_priv->irq_mask != ~0);
 
-       val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT |
-               I915_LPE_PIPE_C_INTERRUPT);
-
-       enable_mask |= val;
-
        dev_priv->irq_mask = ~enable_mask;
 
        GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
index f87b0c4e564d8b85de91e93f7a8d9a6e6f219b61..1a78363c7f4a9e974edbc4e4f31ec7d64d26b6ad 100644 (file)
@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
 static const struct intel_device_info intel_ironlake_m_info = {
        GEN5_FEATURES,
        .platform = INTEL_IRONLAKE,
-       .is_mobile = 1,
+       .is_mobile = 1, .has_fbc = 1,
 };
 
 #define GEN6_FEATURES \
@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
        .has_hw_contexts = 1, \
        .has_logical_ring_contexts = 1, \
        .has_guc = 1, \
-       .has_decoupled_mmio = 1, \
        .has_aliasing_ppgtt = 1, \
        .has_full_ppgtt = 1, \
        .has_full_48bit_ppgtt = 1, \
index 5a7c63e64381e48a193610305973c468502565d2..65b837e96fe629d58f539b253dc1ab14595a459b 100644 (file)
@@ -8280,7 +8280,7 @@ enum {
 
 /* MIPI DSI registers */
 
-#define _MIPI_PORT(port, a, c) ((port) ? c : a)        /* ports A and C only */
+#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c)    /* ports A and C only */
 #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
 
 #define MIPIO_TXESC_CLK_DIV1                   _MMIO(0x160004)
index 3617927af269afb9872b0d5d419873f0945f880c..569717a1272367a91cf682a9cae7640f9ae32777 100644 (file)
@@ -6101,7 +6101,7 @@ retry:
        pipe_config->fdi_lanes = lane;
 
        intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
-                              link_bw, &pipe_config->fdi_m_n);
+                              link_bw, &pipe_config->fdi_m_n, false);
 
        ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
        if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
@@ -6277,7 +6277,8 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 }
 
 static void compute_m_n(unsigned int m, unsigned int n,
-                       uint32_t *ret_m, uint32_t *ret_n)
+                       uint32_t *ret_m, uint32_t *ret_n,
+                       bool reduce_m_n)
 {
        /*
         * Reduce M/N as much as possible without loss in precision. Several DP
@@ -6285,9 +6286,11 @@ static void compute_m_n(unsigned int m, unsigned int n,
         * values. The passed in values are more likely to have the least
         * significant bits zero than M after rounding below, so do this first.
         */
-       while ((m & 1) == 0 && (n & 1) == 0) {
-               m >>= 1;
-               n >>= 1;
+       if (reduce_m_n) {
+               while ((m & 1) == 0 && (n & 1) == 0) {
+                       m >>= 1;
+                       n >>= 1;
+               }
        }
 
        *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -6298,16 +6301,19 @@ static void compute_m_n(unsigned int m, unsigned int n,
 void
 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
                       int pixel_clock, int link_clock,
-                      struct intel_link_m_n *m_n)
+                      struct intel_link_m_n *m_n,
+                      bool reduce_m_n)
 {
        m_n->tu = 64;
 
        compute_m_n(bits_per_pixel * pixel_clock,
                    link_clock * nlanes * 8,
-                   &m_n->gmch_m, &m_n->gmch_n);
+                   &m_n->gmch_m, &m_n->gmch_n,
+                   reduce_m_n);
 
        compute_m_n(pixel_clock, link_clock,
-                   &m_n->link_m, &m_n->link_n);
+                   &m_n->link_m, &m_n->link_n,
+                   reduce_m_n);
 }
 
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -12197,6 +12203,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
         * type. For DP ports it behaves like most other platforms, but on HDMI
         * there's an extra 1 line difference. So we need to add two instead of
         * one to the value.
+        *
+        * On VLV/CHV DSI the scanline counter would appear to increment
+        * approx. 1/3 of a scanline before start of vblank. Unfortunately
+        * that means we can't tell whether we're in vblank or not while
+        * we're on that particular line. We must still set scanline_offset
+        * to 1 so that the vblank timestamps come out correct when we query
+        * the scanline counter from within the vblank interrupt handler.
+        * However if queried just before the start of vblank we'll get an
+        * answer that's slightly in the future.
         */
        if (IS_GEN2(dev_priv)) {
                const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
index ee77b519835c5fd9d8c582a9c3169b43d06ebab6..fc691b8b317cf3924a98adfb51ca1183a6f2a6b3 100644 (file)
@@ -1507,37 +1507,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
        DRM_DEBUG_KMS("common rates: %s\n", str);
 }
 
-bool
-__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
-{
-       u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
-                                                     DP_SINK_OUI;
-
-       return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
-              sizeof(*desc);
-}
-
-bool intel_dp_read_desc(struct intel_dp *intel_dp)
-{
-       struct intel_dp_desc *desc = &intel_dp->desc;
-       bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
-                      DP_OUI_SUPPORT;
-       int dev_id_len;
-
-       if (!__intel_dp_read_desc(intel_dp, desc))
-               return false;
-
-       dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
-       DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
-                     drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
-                     (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
-                     dev_id_len, desc->device_id,
-                     desc->hw_rev >> 4, desc->hw_rev & 0xf,
-                     desc->sw_major_rev, desc->sw_minor_rev);
-
-       return true;
-}
-
 static int rate_to_index(int find, const int *rates)
 {
        int i = 0;
@@ -1624,6 +1593,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        int common_rates[DP_MAX_SUPPORTED_RATES] = {};
        int common_len;
        uint8_t link_bw, rate_select;
+       bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+                                          DP_DPCD_QUIRK_LIMITED_M_N);
 
        common_len = intel_dp_common_rates(intel_dp, common_rates);
 
@@ -1753,7 +1724,8 @@ found:
        intel_link_compute_m_n(bpp, lane_count,
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
-                              &pipe_config->dp_m_n);
+                              &pipe_config->dp_m_n,
+                              reduce_m_n);
 
        if (intel_connector->panel.downclock_mode != NULL &&
                dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -1761,7 +1733,8 @@ found:
                        intel_link_compute_m_n(bpp, lane_count,
                                intel_connector->panel.downclock_mode->clock,
                                pipe_config->port_clock,
-                               &pipe_config->dp_m2_n2);
+                               &pipe_config->dp_m2_n2,
+                               reduce_m_n);
        }
 
        /*
@@ -3622,7 +3595,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
        if (!intel_dp_read_dpcd(intel_dp))
                return false;
 
-       intel_dp_read_desc(intel_dp);
+       drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+                        drm_dp_is_branch(intel_dp->dpcd));
 
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
                dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -4624,7 +4598,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
 
        intel_dp_print_rates(intel_dp);
 
-       intel_dp_read_desc(intel_dp);
+       drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+                        drm_dp_is_branch(intel_dp->dpcd));
 
        intel_dp_configure_mst(intel_dp);
 
index c1f62eb07c07a7ce49b3ed39e1e6ee2b23eb65e8..989e25577ac0445f9e7632a575a6de38d7f9ec49 100644 (file)
@@ -44,6 +44,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        int lane_count, slots;
        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        int mst_pbn;
+       bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+                                          DP_DPCD_QUIRK_LIMITED_M_N);
 
        pipe_config->has_pch_encoder = false;
        bpp = 24;
@@ -75,7 +77,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        intel_link_compute_m_n(bpp, lane_count,
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
-                              &pipe_config->dp_m_n);
+                              &pipe_config->dp_m_n,
+                              reduce_m_n);
 
        pipe_config->dp_m_n.tu = slots;
 
index aaee3949a42267603a5dfa9deb9be87dd0f7b2b4..f630c7af50205540b64481d9c3ee559fbdfc8f7f 100644 (file)
@@ -906,14 +906,6 @@ enum link_m_n_set {
        M2_N2
 };
 
-struct intel_dp_desc {
-       u8 oui[3];
-       u8 device_id[6];
-       u8 hw_rev;
-       u8 sw_major_rev;
-       u8 sw_minor_rev;
-} __packed;
-
 struct intel_dp_compliance_data {
        unsigned long edid;
        uint8_t video_pattern;
@@ -957,7 +949,7 @@ struct intel_dp {
        /* Max link BW for the sink as per DPCD registers */
        int max_sink_link_bw;
        /* sink or branch descriptor */
-       struct intel_dp_desc desc;
+       struct drm_dp_desc desc;
        struct drm_dp_aux aux;
        enum intel_display_power_domain aux_power_domain;
        uint8_t train_set[4];
@@ -1532,9 +1524,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 }
 
 bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
-bool __intel_dp_read_desc(struct intel_dp *intel_dp,
-                         struct intel_dp_desc *desc);
-bool intel_dp_read_desc(struct intel_dp *intel_dp);
 int intel_dp_link_required(int pixel_clock, int bpp);
 int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
index 854e8e0c836bd2099c1cfcb72e12e3ec5ff21915..f94eacff196c5d0980690ae95cda45c42e3a4e9b 100644 (file)
@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
        return 0;
 }
 
+static bool ring_is_idle(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       bool idle = true;
+
+       intel_runtime_pm_get(dev_priv);
+
+       /* No bit for gen2, so assume the CS parser is idle */
+       if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+               idle = false;
+
+       intel_runtime_pm_put(dev_priv);
+
+       return idle;
+}
+
 /**
  * intel_engine_is_idle() - Report if the engine has finished process all work
  * @engine: the intel_engine_cs
@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
  */
 bool intel_engine_is_idle(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-
        /* Any inflight/incomplete requests? */
        if (!i915_seqno_passed(intel_engine_get_seqno(engine),
                               intel_engine_last_submit(engine)))
@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
                return false;
 
        /* Ring stopped? */
-       if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+       if (!ring_is_idle(engine))
                return false;
 
        return true;
index ded2add18b26122d7f6395d0d5532da26dd21f34..d93c58410bffe9701d148e546db753dff84c4083 100644 (file)
@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
                                            int *width, int *height)
 {
-       int w, h;
-
-       if (drm_rotation_90_or_270(cache->plane.rotation)) {
-               w = cache->plane.src_h;
-               h = cache->plane.src_w;
-       } else {
-               w = cache->plane.src_w;
-               h = cache->plane.src_h;
-       }
-
        if (width)
-               *width = w;
+               *width = cache->plane.src_w;
        if (height)
-               *height = h;
+               *height = cache->plane.src_h;
 }
 
 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
 
        cache->plane.rotation = plane_state->base.rotation;
+       /*
+        * Src coordinates are already rotated by 270 degrees for
+        * the 90/270 degree plane rotation cases (to match the
+        * GTT mapping), hence no need to account for rotation here.
+        */
        cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
        cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
        cache->plane.visible = plane_state->base.visible;
index 668f00480d97c0ff0418c19dfaaffec31fc65341..292fedf30b0010c33e1eefd8f643b1b87bd38edd 100644 (file)
@@ -149,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
 
 static void lpe_audio_irq_unmask(struct irq_data *d)
 {
-       struct drm_i915_private *dev_priv = d->chip_data;
-       unsigned long irqflags;
-       u32 val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT);
-
-       if (IS_CHERRYVIEW(dev_priv))
-               val |= I915_LPE_PIPE_C_INTERRUPT;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
-       dev_priv->irq_mask &= ~val;
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       POSTING_READ(VLV_IMR);
-
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 static void lpe_audio_irq_mask(struct irq_data *d)
 {
-       struct drm_i915_private *dev_priv = d->chip_data;
-       unsigned long irqflags;
-       u32 val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT);
-
-       if (IS_CHERRYVIEW(dev_priv))
-               val |= I915_LPE_PIPE_C_INTERRUPT;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
-       dev_priv->irq_mask |= val;
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IIR, val);
-       POSTING_READ(VLV_IIR);
-
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 static struct irq_chip lpe_audio_irqchip = {
@@ -330,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
 
        desc = irq_to_desc(dev_priv->lpe_audio.irq);
 
-       lpe_audio_irq_mask(&desc->irq_data);
-
        lpe_audio_platdev_destroy(dev_priv);
 
        irq_free_desc(dev_priv->lpe_audio.irq);
index c8f7c631fc1f8e354cac0038c80aa35d0a1dd0d2..dac4e003c1f317ec402110132bad0c3a734bf52a 100644 (file)
@@ -1989,7 +1989,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
        ce->ring = ring;
        ce->state = vma;
-       ce->initialised = engine->init_context == NULL;
+       ce->initialised |= engine->init_context == NULL;
 
        return 0;
 
index 71cbe9c089320cbc305c827bacd41fcbf1e542ce..5abef482eacf1b24780edea4c40ab7e593a42dc6 100644 (file)
@@ -240,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
                return false;
        }
 
-       intel_dp_read_desc(dp);
+       drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
 
        DRM_DEBUG_KMS("Success: LSPCON init\n");
        return true;
index 570bd603f401d513ac3f08c67fc78d6d1523b762..2ca481b5aa691872d39263605ef67b9c7335cec6 100644 (file)
@@ -4335,10 +4335,18 @@ skl_compute_wm(struct drm_atomic_state *state)
        struct drm_crtc_state *cstate;
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct skl_wm_values *results = &intel_state->wm_results;
+       struct drm_device *dev = state->dev;
        struct skl_pipe_wm *pipe_wm;
        bool changed = false;
        int ret, i;
 
+       /*
+        * When we distrust bios wm we always need to recompute to set the
+        * expected DDB allocations for each CRTC.
+        */
+       if (to_i915(dev)->wm.distrust_bios_wm)
+               changed = true;
+
        /*
         * If this transaction isn't actually touching any CRTC's, don't
         * bother with watermark calculation.  Note that if we pass this
@@ -4349,6 +4357,7 @@ skl_compute_wm(struct drm_atomic_state *state)
         */
        for_each_new_crtc_in_state(state, crtc, cstate, i)
                changed = true;
+
        if (!changed)
                return 0;
 
index c3780d0d2baf752ce9d590b6f6c8db67674ec745..559f1ab42bfc23e005020d9bb3cb88e0f0d57943 100644 (file)
@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
        }
 
        /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
-       if (intel_crtc->config->pipe_src_w > 3200 ||
-                               intel_crtc->config->pipe_src_h > 2000) {
+       if (dev_priv->psr.psr2_support &&
+           (intel_crtc->config->pipe_src_w > 3200 ||
+            intel_crtc->config->pipe_src_h > 2000)) {
                dev_priv->psr.psr2_support = false;
                return false;
        }
index 8c87c717c7cda92c4256cf277828e594f96a0ad1..e6517edcd16b55608c125452b56904f2b48e90df 100644 (file)
@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
  */
 void intel_pipe_update_start(struct intel_crtc *crtc)
 {
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        long timeout = msecs_to_jiffies_timeout(1);
        int scanline, min, max, vblank_start;
        wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+       bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+               intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
        DEFINE_WAIT(wait);
 
        vblank_start = adjusted_mode->crtc_vblank_start;
@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
 
        drm_crtc_vblank_put(&crtc->base);
 
+       /*
+        * On VLV/CHV DSI the scanline counter would appear to
+        * increment approx. 1/3 of a scanline before start of vblank.
+        * The registers still get latched at start of vblank however.
+        * This means we must not write any registers on the first
+        * line of vblank (since not the whole line is actually in
+        * vblank). And unfortunately we can't use the interrupt to
+        * wait here since it will fire too soon. We could use the
+        * frame start interrupt instead since it will fire after the
+        * critical scanline, but that would require more changes
+        * in the interrupt code. So for now we'll just do the nasty
+        * thing and poll for the bad scanline to pass us by.
+        *
+        * FIXME figure out if BXT+ DSI suffers from this as well
+        */
+       while (need_vlv_dsi_wa && scanline == vblank_start)
+               scanline = intel_get_crtc_scanline(crtc);
+
        crtc->debug.scanline_start = scanline;
        crtc->debug.start_vbl_time = ktime_get();
        crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
index 4b7f73aeddac6475db31d184853f833c8ba3d510..f84115261ae78b02591a64cb77de96c3fda167bb 100644 (file)
@@ -59,8 +59,6 @@ struct drm_i915_gem_request;
  *                available in the work queue (note, the queue is shared,
  *                not per-engine). It is OK for this to be nonzero, but
  *                it should not be huge!
- *   q_fail: failed to enqueue a work item. This should never happen,
- *           because we check for space beforehand.
  *   b_fail: failed to ring the doorbell. This should never happen, unless
  *           somehow the hardware misbehaves, or maybe if the GuC firmware
  *           crashes? We probably need to reset the GPU to recover.
index 1afb8b06e3e19bf23ed287277415afb364504b23..12b85b3278cd1cfc53b159253e9152e3d8f1784b 100644 (file)
@@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
 static int igt_ctx_exec(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_object *obj;
+       struct drm_i915_gem_object *obj = NULL;
        struct drm_file *file;
        IGT_TIMEOUT(end_time);
        LIST_HEAD(objects);
@@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg)
                }
 
                for_each_engine(engine, i915, id) {
-                       if (dw == 0) {
+                       if (!obj) {
                                obj = create_test_object(ctx, file, &objects);
                                if (IS_ERR(obj)) {
                                        err = PTR_ERR(obj);
@@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg)
                                goto out_unlock;
                        }
 
-                       if (++dw == max_dwords(obj))
+                       if (++dw == max_dwords(obj)) {
+                               obj = NULL;
                                dw = 0;
+                       }
                        ndwords++;
                }
                ncontexts++;
index 8fb801fab039b10225765b044a4e535cf7a4201d..8b05ecb8fdefccafeed07755d501e8902ccba0c3 100644 (file)
@@ -673,7 +673,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                ret = drm_of_find_panel_or_bridge(child,
                                                  imx_ldb->lvds_mux ? 4 : 2, 0,
                                                  &channel->panel, &channel->bridge);
-               if (ret)
+               if (ret && ret != -ENODEV)
                        return ret;
 
                /* panel ddc only if there is no bridge */
index 808b995a990f5529b303e23cb1085b4b7f478355..b5cc6e12334cf96e8faacc01a1a8fb5dcec48202 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_of.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/iopoll.h>
 #include <linux/irq.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -900,16 +901,12 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
 
 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
 {
-       u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
-
-       while (timeout_ms--) {
-               if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
-                       break;
-
-               usleep_range(2, 4);
-       }
+       int ret;
+       u32 val;
 
-       if (timeout_ms == 0) {
+       ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
+                                4, 2000000);
+       if (ret) {
                DRM_WARN("polling dsi wait not busy timeout!\n");
 
                mtk_dsi_enable(dsi);
index 41a1c03b03476b620a511731518b8d0f7772417d..0a4ffd7241468dcbd064fa3a210f17094d10697b 100644 (file)
@@ -1062,7 +1062,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
        }
 
        err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
-       if (err) {
+       if (err < 0) {
                dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
                        err);
                return err;
index 75382f5f0fcec00a8749df932cfd7dba9eb19542..10b227d83e9ac7af98b8177188bb56d48823f2b1 100644 (file)
@@ -152,7 +152,7 @@ static struct regmap_config meson_regmap_config = {
        .max_register   = 0x1000,
 };
 
-static int meson_drv_bind(struct device *dev)
+static int meson_drv_bind_master(struct device *dev, bool has_components)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct meson_drm *priv;
@@ -233,10 +233,12 @@ static int meson_drv_bind(struct device *dev)
        if (ret)
                goto free_drm;
 
-       ret = component_bind_all(drm->dev, drm);
-       if (ret) {
-               dev_err(drm->dev, "Couldn't bind all components\n");
-               goto free_drm;
+       if (has_components) {
+               ret = component_bind_all(drm->dev, drm);
+               if (ret) {
+                       dev_err(drm->dev, "Couldn't bind all components\n");
+                       goto free_drm;
+               }
        }
 
        ret = meson_plane_create(priv);
@@ -276,6 +278,11 @@ free_drm:
        return ret;
 }
 
+static int meson_drv_bind(struct device *dev)
+{
+       return meson_drv_bind_master(dev, true);
+}
+
 static void meson_drv_unbind(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
@@ -357,6 +364,9 @@ static int meson_drv_probe(struct platform_device *pdev)
                count += meson_probe_remote(pdev, &match, np, remote);
        }
 
+       if (count && !match)
+               return meson_drv_bind_master(&pdev->dev, false);
+
        /* If some endpoints were found, initialize the nodes */
        if (count) {
                dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
index 5b8e23d051f2f3752a180df4abedccefcfebc3ed..0a31cd6d01ce145f3f112b8c19d17dcdd46ea524 100644 (file)
@@ -13,6 +13,7 @@ config DRM_MSM
        select QCOM_SCM
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
+       select PM_OPP
        default y
        help
          DRM/KMS driver for MSM/snapdragon.
index f8f48d014978c0ccd5cc9ffcc3d699b86f779399..9c34d7824988654ab2f8366741724da8ac18b82a 100644 (file)
@@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
        return 0;
 }
 
-static struct irq_domain_ops mdss_hw_irqdomain_ops = {
+static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
        .map = mdss_hw_irqdomain_map,
        .xlate = irq_domain_xlate_onecell,
 };
index a38c5fe6cc19752a9832618af1a4f146bab4a8df..7d3741215387110bb7f7ad622cb54d0d411fb9f1 100644 (file)
@@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
 
        mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
                        sizeof(*mdp5_state), GFP_KERNEL);
+       if (!mdp5_state)
+               return NULL;
 
-       if (mdp5_state && mdp5_state->base.fb)
-               drm_framebuffer_reference(mdp5_state->base.fb);
+       __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
 
        return &mdp5_state->base;
 }
@@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
                        mdp5_pipe_release(state->state, old_hwpipe);
                        mdp5_pipe_release(state->state, old_right_hwpipe);
                }
+       } else {
+               mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+               mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+               mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
        }
 
        return 0;
index 87b5695d4034df0e118475617167ee990ccfc490..9d498eb81906220705d85c57260cd4b1f82fa1fc 100644 (file)
@@ -830,6 +830,7 @@ static struct drm_driver msm_driver = {
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export   = drm_gem_prime_export,
        .gem_prime_import   = drm_gem_prime_import,
+       .gem_prime_res_obj  = msm_gem_prime_res_obj,
        .gem_prime_pin      = msm_gem_prime_pin,
        .gem_prime_unpin    = msm_gem_prime_unpin,
        .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
index 28b6f9ba50664509bb44fa2b5704d3bcc86a67af..1b26ca626528ab5f4435f689d0a213f0e672aaa5 100644 (file)
@@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *msm_gem_prime_vmap(struct drm_gem_object *obj);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
                struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
index 3f299c537b77ae347bce4c92368ed774c7695459..a2f89bac9c160674f5f103f75a04a7a92e7c5b99 100644 (file)
@@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
 }
 
 struct msm_fence {
-       struct msm_fence_context *fctx;
        struct dma_fence base;
+       struct msm_fence_context *fctx;
 };
 
 static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
@@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence)
        return fence_completed(f->fctx, f->base.seqno);
 }
 
-static void msm_fence_release(struct dma_fence *fence)
-{
-       struct msm_fence *f = to_msm_fence(fence);
-       kfree_rcu(f, base.rcu);
-}
-
 static const struct dma_fence_ops msm_fence_ops = {
        .get_driver_name = msm_fence_get_driver_name,
        .get_timeline_name = msm_fence_get_timeline_name,
        .enable_signaling = msm_fence_enable_signaling,
        .signaled = msm_fence_signaled,
        .wait = dma_fence_default_wait,
-       .release = msm_fence_release,
+       .release = dma_fence_free,
 };
 
 struct dma_fence *
index 68e509b3b9e4d08730e3901f46a397519c33e77c..50289a23baf8df27c4bc1aebf067da2b011b8f28 100644 (file)
@@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
        struct msm_gem_object *msm_obj;
        bool use_vram = false;
 
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
        switch (flags & MSM_BO_CACHE_MASK) {
        case MSM_BO_UNCACHED:
        case MSM_BO_CACHED:
@@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 
        size = PAGE_ALIGN(dmabuf->size);
 
+       /* Take mutex so we can modify the inactive list in msm_gem_new_impl */
+       mutex_lock(&dev->struct_mutex);
        ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
+       mutex_unlock(&dev->struct_mutex);
+
        if (ret)
                goto fail;
 
index 60bb290700cef9c32fc2ca0dd2db229a6a7ffedf..13403c6da6c75012fa5f17f4b0b63075ddf20874 100644 (file)
@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
        if (!obj->import_attach)
                msm_gem_put_pages(obj);
 }
+
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+       return msm_obj->resv;
+}
index 1c545ebe6a5a0f875a995b1db3a1204b0b21d57e..7832e6421d250d0bd78400057e46dce07dc2d18c 100644 (file)
@@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                if (!in_fence)
                        return -EINVAL;
 
-               /* TODO if we get an array-fence due to userspace merging multiple
-                * fences, we need a way to determine if all the backing fences
-                * are from our own context..
+               /*
+                * Wait if the fence is from a foreign context, or if the fence
+                * array contains any fence from a foreign context.
                 */
-
-               if (in_fence->context != gpu->fctx->context) {
+               if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
                        ret = dma_fence_wait(in_fence, true);
                        if (ret)
                                return ret;
@@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                        goto out;
                }
 
-               if ((submit_cmd.size + submit_cmd.submit_offset) >=
-                               msm_obj->base.size) {
+               if (!submit_cmd.size ||
+                       ((submit_cmd.size + submit_cmd.submit_offset) >
+                               msm_obj->base.size)) {
                        DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
                        ret = -EINVAL;
                        goto out;
index 97b9c38c6b3ff7e05adf9ea84e8328d19e32b90f..0fdc88d79ca87b3a54709aa4d527db80ca0997dc 100644 (file)
@@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
                gpu->grp_clks[i] = get_clock(dev, name);
 
                /* Remember the key clocks that we need to control later */
-               if (!strcmp(name, "core"))
+               if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
                        gpu->core_clk = gpu->grp_clks[i];
-               else if (!strcmp(name, "rbbmtimer"))
+               else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
                        gpu->rbbmtimer_clk = gpu->grp_clks[i];
 
                ++i;
index 6a567fe347b369a2c01d5e89c67ac0a881a49420..820a4805916f1da8115b798cf3c93d5750ae8196 100644 (file)
@@ -4,6 +4,7 @@
 
 struct nvkm_alarm {
        struct list_head head;
+       struct list_head exec;
        u64 timestamp;
        void (*func)(struct nvkm_alarm *);
 };
index 36268e1802b5afcd65c6b3d623b273c4ac60af87..15a13d09d431c9a8d4822fb5f997bcf3225a2d4c 100644 (file)
@@ -80,7 +80,7 @@ int nouveau_modeset = -1;
 module_param_named(modeset, nouveau_modeset, int, 0400);
 
 MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
-int nouveau_runtime_pm = -1;
+static int nouveau_runtime_pm = -1;
 module_param_named(runpm, nouveau_runtime_pm, int, 0400);
 
 static struct drm_driver driver_stub;
@@ -495,7 +495,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
        nouveau_fbcon_init(dev);
        nouveau_led_init(dev);
 
-       if (nouveau_runtime_pm != 0) {
+       if (nouveau_pmops_runtime()) {
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
                pm_runtime_set_active(dev->dev);
@@ -527,7 +527,7 @@ nouveau_drm_unload(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
 
-       if (nouveau_runtime_pm != 0) {
+       if (nouveau_pmops_runtime()) {
                pm_runtime_get_sync(dev->dev);
                pm_runtime_forbid(dev->dev);
        }
@@ -726,6 +726,14 @@ nouveau_pmops_thaw(struct device *dev)
        return nouveau_do_resume(drm_dev, false);
 }
 
+bool
+nouveau_pmops_runtime()
+{
+       if (nouveau_runtime_pm == -1)
+               return nouveau_is_optimus() || nouveau_is_v1_dsm();
+       return nouveau_runtime_pm == 1;
+}
+
 static int
 nouveau_pmops_runtime_suspend(struct device *dev)
 {
@@ -733,14 +741,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
        int ret;
 
-       if (nouveau_runtime_pm == 0) {
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* are we optimus enabled? */
-       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+       if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
@@ -765,8 +766,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
        struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
        int ret;
 
-       if (nouveau_runtime_pm == 0)
-               return -EINVAL;
+       if (!nouveau_pmops_runtime()) {
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
@@ -796,14 +799,7 @@ nouveau_pmops_runtime_idle(struct device *dev)
        struct nouveau_drm *drm = nouveau_drm(drm_dev);
        struct drm_crtc *crtc;
 
-       if (nouveau_runtime_pm == 0) {
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* are we optimus enabled? */
-       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+       if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
index eadec2f49ad318cf44d3464ff39dbe201e7074cb..a11b6aaed325f17ddf6f82c8fd8ced531191574a 100644 (file)
@@ -108,8 +108,6 @@ nouveau_cli(struct drm_file *fpriv)
 #include <nvif/object.h>
 #include <nvif/device.h>
 
-extern int nouveau_runtime_pm;
-
 struct nouveau_drm {
        struct nouveau_cli client;
        struct drm_device *dev;
@@ -195,6 +193,7 @@ nouveau_drm(struct drm_device *dev)
 
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
+bool nouveau_pmops_runtime(void);
 
 #include <nvkm/core/tegra.h>
 
index a4aacbc0cec8efe603d18152099aaecdeab93dce..02fe0efb9e1643f3a4802b947b3ab306bdd690bd 100644 (file)
@@ -87,7 +87,7 @@ void
 nouveau_vga_init(struct nouveau_drm *drm)
 {
        struct drm_device *dev = drm->dev;
-       bool runtime = false;
+       bool runtime = nouveau_pmops_runtime();
 
        /* only relevant for PCI devices */
        if (!dev->pdev)
@@ -99,10 +99,6 @@ nouveau_vga_init(struct nouveau_drm *drm)
        if (pci_is_thunderbolt_attached(dev->pdev))
                return;
 
-       if (nouveau_runtime_pm == 1)
-               runtime = true;
-       if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
-               runtime = true;
        vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
 
        if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
@@ -113,18 +109,13 @@ void
 nouveau_vga_fini(struct nouveau_drm *drm)
 {
        struct drm_device *dev = drm->dev;
-       bool runtime = false;
+       bool runtime = nouveau_pmops_runtime();
 
        vga_client_register(dev->pdev, NULL, NULL, NULL);
 
        if (pci_is_thunderbolt_attached(dev->pdev))
                return;
 
-       if (nouveau_runtime_pm == 1)
-               runtime = true;
-       if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
-               runtime = true;
-
        vga_switcheroo_unregister_client(dev->pdev);
        if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
                vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
index a7663249b3baf2df1c5c75d87d3b32109984ba97..06e564a9ccb253b3018b45d2f0a96cc53430ab2c 100644 (file)
@@ -2107,7 +2107,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
                                        asyc->set.dither = true;
                        }
                } else {
-                       asyc->set.mask = ~0;
+                       if (asyc)
+                               asyc->set.mask = ~0;
                        asyh->set.mask = ~0;
                }
 
index f2a86eae0a0d624b31cb8ee9a65e6487705a6c1a..2437f7d41ca20de616a7193f2d6fdec6d7daea00 100644 (file)
@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
                /* Move to completed list.  We'll drop the lock before
                 * executing the callback so it can reschedule itself.
                 */
-               list_move_tail(&alarm->head, &exec);
+               list_del_init(&alarm->head);
+               list_add(&alarm->exec, &exec);
        }
 
        /* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
        spin_unlock_irqrestore(&tmr->lock, flags);
 
        /* Execute completed callbacks. */
-       list_for_each_entry_safe(alarm, atemp, &exec, head) {
-               list_del_init(&alarm->head);
+       list_for_each_entry_safe(alarm, atemp, &exec, exec) {
+               list_del(&alarm->exec);
                alarm->func(alarm);
        }
 }
index 058340a002c29daef072f21bf90e1e8a572b3fa9..4a340efd8ba67ac23aadc6611ea3ce7e7d460e1a 100644 (file)
@@ -575,8 +575,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        if (ret)
                return;
 
-       cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
-
        if (fb != old_state->fb) {
                obj = to_qxl_framebuffer(fb)->obj;
                user_bo = gem_to_qxl_bo(obj);
@@ -614,6 +612,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                qxl_bo_kunmap(cursor_bo);
                qxl_bo_kunmap(user_bo);
 
+               cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
                cmd->u.set.visible = 1;
                cmd->u.set.shape = qxl_bo_physical_address(qdev,
                                                           cursor_bo, 0);
@@ -624,6 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                if (ret)
                        goto out_free_release;
 
+               cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
                cmd->type = QXL_CURSOR_MOVE;
        }
 
index 7ba450832e6b7a59db9499a0919a8bb5f4a3fd1a..ea36dc4dd5d22ec7b30678ea811dcf5009f454f7 100644 (file)
@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
        u32 vblank_time = r600_dpm_get_vblank_time(rdev);
        u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
 
+       /* disable mclk switching if the refresh is >120Hz, even if the
+        * blanking period would allow it
+        */
+       if (r600_dpm_get_vrefresh(rdev) > 120)
+               return true;
+
        if (vblank_time < switch_limit)
                return true;
        else
index ccebe0f8d2e1e3b4ec15d6170b788e2e8fc8b980..008c145b7f29f60a298419931f1922555de5e35a 100644 (file)
@@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
@@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_RX_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
index f130ec41ee4bbcad63516335dfaf17e24e782b60..0bf103536404e5dde2d480bf692a496a6865e817 100644 (file)
@@ -4927,7 +4927,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
@@ -4958,7 +4958,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_RX_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
index 0a085176e79b35b2887c19f7b33091804b58e645..e06e2d8feab397822361ba18a3e4b420cc2f0c4d 100644 (file)
@@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
                        WREG32(DC_HPD5_INT_CONTROL, tmp);
                }
                if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
-                       tmp = RREG32(DC_HPD5_INT_CONTROL);
+                       tmp = RREG32(DC_HPD6_INT_CONTROL);
                        tmp |= DC_HPDx_INT_ACK;
                        WREG32(DC_HPD6_INT_CONTROL, tmp);
                }
index e3e7cb1d10a2941d1790d36a9f5250f70aeb78cd..4761f27f2ca2a073a8ffb30806a9cac905003f23 100644 (file)
@@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
        if ((radeon_runtime_pm != 0) &&
            radeon_has_atpx() &&
            ((flags & RADEON_IS_IGP) == 0) &&
-           !pci_is_thunderbolt_attached(rdev->pdev))
+           !pci_is_thunderbolt_attached(dev->pdev))
                flags |= RADEON_IS_PX;
 
        /* radeon_device_init should report only fatal error
index ceee87f029d9a3479d374c4fc305338377dee961..76d1888528e675c700b543fa6e10c77a466054d7 100644 (file)
@@ -6317,7 +6317,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
@@ -6348,7 +6348,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_RX_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
index d8fa7a9c9240bdf53f06d214f5952af7f85e5ee4..ce5f2d1f9994113b6322a708f47f1e23049ef3ba 100644 (file)
@@ -245,8 +245,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
                                      struct drm_connector_state *conn_state)
 {
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
-       struct rockchip_dp_device *dp = to_dp(encoder);
-       int ret;
 
        /*
         * The hardware IC designed that VOP must output the RGB10 video
@@ -258,16 +256,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
 
        s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
        s->output_type = DRM_MODE_CONNECTOR_eDP;
-       if (dp->data->chip_type == RK3399_EDP) {
-               /*
-                * For RK3399, VOP Lit must code the out mode to RGB888,
-                * VOP Big must code the out mode to RGB10.
-                */
-               ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
-                                                       encoder);
-               if (ret > 0)
-                       s->output_mode = ROCKCHIP_OUT_MODE_P888;
-       }
 
        return 0;
 }
index a2169dd3d26b915c851bd089f25373495c188174..14fa1f8351e8df22ab30560fbb6a1906841ba43d 100644 (file)
@@ -615,7 +615,6 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 {
        struct cdn_dp_device *dp = encoder_to_dp(encoder);
        int ret, val;
-       struct rockchip_crtc_state *state;
 
        ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
        if (ret < 0) {
@@ -625,14 +624,10 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 
        DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
                          (ret) ? "LIT" : "BIG");
-       state = to_rockchip_crtc_state(encoder->crtc->state);
-       if (ret) {
+       if (ret)
                val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
-               state->output_mode = ROCKCHIP_OUT_MODE_P888;
-       } else {
+       else
                val = DP_SEL_VOP_LIT << 16;
-               state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
-       }
 
        ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
        if (ret)
index 3f7a82d1e0956e6a37e1478412210955db38aa19..45589d6ce65ed0fd0a7e1be60f83dd03bd3d47b5 100644 (file)
@@ -875,6 +875,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
 static void vop_crtc_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
+       const struct vop_data *vop_data = vop->data;
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
        struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
        u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
@@ -967,6 +968,13 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
                DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
                              s->output_type);
        }
+
+       /*
+        * if vop is not support RGB10 output, need force RGB10 to RGB888.
+        */
+       if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
+           !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
+               s->output_mode = ROCKCHIP_OUT_MODE_P888;
        VOP_CTRL_SET(vop, out_mode, s->output_mode);
 
        VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
index 5a4faa85dbd29d91af08ae51a8c2ac69012cb33c..9979fd0c22821d7efa3d7054468e0914619e0692 100644 (file)
@@ -142,6 +142,9 @@ struct vop_data {
        const struct vop_intr *intr;
        const struct vop_win_data *win;
        unsigned int win_size;
+
+#define VOP_FEATURE_OUTPUT_RGB10       BIT(0)
+       u64 feature;
 };
 
 /* interrupt define */
index 0da44442aab097b8f4b40d67c8995be625bccfcd..bafd698a28b1b491c01823d2be293a41e67c3722 100644 (file)
@@ -275,6 +275,7 @@ static const struct vop_intr rk3288_vop_intr = {
 static const struct vop_data rk3288_vop = {
        .init_table = rk3288_init_reg_table,
        .table_size = ARRAY_SIZE(rk3288_init_reg_table),
+       .feature = VOP_FEATURE_OUTPUT_RGB10,
        .intr = &rk3288_vop_intr,
        .ctrl = &rk3288_ctrl_data,
        .win = rk3288_vop_win_data,
@@ -343,6 +344,7 @@ static const struct vop_reg_data rk3399_init_reg_table[] = {
 static const struct vop_data rk3399_vop_big = {
        .init_table = rk3399_init_reg_table,
        .table_size = ARRAY_SIZE(rk3399_init_reg_table),
+       .feature = VOP_FEATURE_OUTPUT_RGB10,
        .intr = &rk3399_vop_intr,
        .ctrl = &rk3399_ctrl_data,
        /*
index 130d51c5ec6a2dab1211337d71313e6b1de15323..4b948fba9eec274b794a0546fec0179b3de1cadf 100644 (file)
@@ -41,9 +41,9 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20170221"
+#define VMWGFX_DRIVER_DATE "20170607"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 12
+#define VMWGFX_DRIVER_MINOR 13
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
index b6a0806b06bffaf6da9178905f9b2f6bb037d384..a1c68e6a689e32fd0dd4d74c805ee4afd0836a99 100644 (file)
@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
                                return fifo_state->static_buffer;
                        else {
                                fifo_state->dynamic_buffer = vmalloc(bytes);
+                               if (!fifo_state->dynamic_buffer)
+                                       goto out_err;
                                return fifo_state->dynamic_buffer;
                        }
                }
index ef9f3a2a40303290287b5259b7a71d2a8791ddb4..1d2db5d912b03c572b50f9b64b2f5d2a39de1365 100644 (file)
@@ -274,108 +274,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 }
 
 
-
-/**
- * vmw_du_cursor_plane_update() - Update cursor image and location
- *
- * @plane: plane object to update
- * @crtc: owning CRTC of @plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of plane on crtc
- * @crtc_y: y offset of plane on crtc
- * @crtc_w: width of plane rectangle on crtc
- * @crtc_h: height of plane rectangle on crtc
- * @src_x: Not used
- * @src_y: Not used
- * @src_w: Not used
- * @src_h: Not used
- *
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
-                              struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int crtc_x, int crtc_y,
-                              unsigned int crtc_w,
-                              unsigned int crtc_h,
-                              uint32_t src_x, uint32_t src_y,
-                              uint32_t src_w, uint32_t src_h)
-{
-       struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-       struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-       struct vmw_surface *surface = NULL;
-       struct vmw_dma_buffer *dmabuf = NULL;
-       s32 hotspot_x, hotspot_y;
-       int ret;
-
-       hotspot_x = du->hotspot_x + fb->hot_x;
-       hotspot_y = du->hotspot_y + fb->hot_y;
-
-       /* A lot of the code assumes this */
-       if (crtc_w != 64 || crtc_h != 64) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (vmw_framebuffer_to_vfb(fb)->dmabuf)
-               dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
-       else
-               surface = vmw_framebuffer_to_vfbs(fb)->surface;
-
-       if (surface && !surface->snooper.image) {
-               DRM_ERROR("surface not suitable for cursor\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* setup new image */
-       ret = 0;
-       if (surface) {
-               /* vmw_user_surface_lookup takes one reference */
-               du->cursor_surface = surface;
-
-               du->cursor_age = du->cursor_surface->snooper.age;
-
-               ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
-                                             64, 64, hotspot_x, hotspot_y);
-       } else if (dmabuf) {
-               /* vmw_user_surface_lookup takes one reference */
-               du->cursor_dmabuf = dmabuf;
-
-               ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
-                                              hotspot_x, hotspot_y);
-       } else {
-               vmw_cursor_update_position(dev_priv, false, 0, 0);
-               goto out;
-       }
-
-       if (!ret) {
-               du->cursor_x = crtc_x + du->set_gui_x;
-               du->cursor_y = crtc_y + du->set_gui_y;
-
-               vmw_cursor_update_position(dev_priv, true,
-                                          du->cursor_x + hotspot_x,
-                                          du->cursor_y + hotspot_y);
-       }
-
-out:
-       return ret;
-}
-
-
-int vmw_du_cursor_plane_disable(struct drm_plane *plane)
-{
-       if (plane->fb) {
-               drm_framebuffer_unreference(plane->fb);
-               plane->fb = NULL;
-       }
-
-       return -EINVAL;
-}
-
-
 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 {
        vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
@@ -472,18 +370,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 }
 
 
-void
-vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
-                                  struct drm_plane_state *old_state)
-{
-       struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
-       struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-
-       drm_atomic_set_fb_for_plane(plane->state, NULL);
-       vmw_cursor_update_position(dev_priv, false, 0, 0);
-}
-
-
 void
 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
                                  struct drm_plane_state *old_state)
@@ -1498,6 +1384,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
         */
        if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
            dmabuf && only_2d &&
+           mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
            dev_priv->active_display_unit == vmw_du_screen_target) {
                ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
                                              dmabuf, &surface);
index 13f2f1d2818a755012098df126938989ba1fb297..5f8d678ae675156178dc306efc2fc83338c390ba 100644 (file)
@@ -256,10 +256,6 @@ int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
                           u16 *r, u16 *g, u16 *b,
                           uint32_t size,
                           struct drm_modeset_acquire_ctx *ctx);
-int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
-                           uint32_t handle, uint32_t width, uint32_t height,
-                           int32_t hot_x, int32_t hot_y);
-int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
 int vmw_du_connector_set_property(struct drm_connector *connector,
                                  struct drm_property *property,
                                  uint64_t val);
@@ -339,15 +335,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
 /* Universal Plane Helpers */
 void vmw_du_primary_plane_destroy(struct drm_plane *plane);
 void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
-int vmw_du_cursor_plane_disable(struct drm_plane *plane);
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
-                              struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int crtc_x, int crtc_y,
-                              unsigned int crtc_w,
-                              unsigned int crtc_h,
-                              uint32_t src_x, uint32_t src_y,
-                              uint32_t src_w, uint32_t src_h);
 
 /* Atomic Helpers */
 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
@@ -356,8 +343,6 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
                                     struct drm_plane_state *state);
 void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
                                       struct drm_plane_state *old_state);
-void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
-                                       struct drm_plane_state *old_state);
 int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
                                   struct drm_plane_state *new_state);
 void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
index bad31bdf09b6c1d8bd31663c7973e1ebb912f340..50be1f034f9efa701f2c6feda57fe28d8cf6d596 100644 (file)
@@ -56,6 +56,8 @@ enum stdu_content_type {
  * @right: Right side of bounding box.
  * @top: Top side of bounding box.
  * @bottom: Bottom side of bounding box.
+ * @fb_left: Left side of the framebuffer/content bounding box
+ * @fb_top: Top of the framebuffer/content bounding box
  * @buf: DMA buffer when DMA-ing between buffer and screen targets.
  * @sid: Surface ID when copying between surface and screen targets.
  */
@@ -63,6 +65,7 @@ struct vmw_stdu_dirty {
        struct vmw_kms_dirty base;
        SVGA3dTransferType  transfer;
        s32 left, right, top, bottom;
+       s32 fb_left, fb_top;
        u32 pitch;
        union {
                struct vmw_dma_buffer *buf;
@@ -647,7 +650,7 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  *
  * @dirty: The closure structure.
  *
- * This function calculates the bounding box for all the incoming clips
+ * This function calculates the bounding box for all the incoming clips.
  */
 static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 {
@@ -656,11 +659,19 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 
        dirty->num_hits = 1;
 
-       /* Calculate bounding box */
+       /* Calculate destination bounding box */
        ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
        ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
        ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
        ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+
+       /*
+        * Calculate content bounding box.  We only need the top-left
+        * coordinate because width and height will be the same as the
+        * destination bounding box above
+        */
+       ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
+       ddirty->fb_top  = min_t(s32, ddirty->fb_top, dirty->fb_y);
 }
 
 
@@ -697,11 +708,11 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
        src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
        src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
-       src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp;
+       src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
 
        dst_pitch = ddirty->pitch;
        dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
-       dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp;
+       dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
 
 
        /* Figure out the real direction */
@@ -760,7 +771,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        }
 
 out_cleanup:
-       ddirty->left = ddirty->top = S32_MAX;
+       ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
        ddirty->right = ddirty->bottom = S32_MIN;
 }
 
@@ -812,6 +823,7 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
                SVGA3D_READ_HOST_VRAM;
        ddirty.left = ddirty.top = S32_MAX;
        ddirty.right = ddirty.bottom = S32_MIN;
+       ddirty.fb_left = ddirty.fb_top = S32_MAX;
        ddirty.pitch = vfb->base.pitches[0];
        ddirty.buf = buf;
        ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
@@ -1355,6 +1367,11 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
                DRM_ERROR("Failed to bind surface to STDU.\n");
        else
                crtc->primary->fb = plane->state->fb;
+
+       ret = vmw_stdu_update_st(dev_priv, stdu);
+
+       if (ret)
+               DRM_ERROR("Failed to update STDU.\n");
 }
 
 
index 7681341fe32b8725840d70b137782f5f1f316bc0..6b70bd259953580204ccecd4ec4334c73e73eed7 100644 (file)
@@ -1274,11 +1274,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        int ret;
        uint32_t size;
-       uint32_t backup_handle;
+       uint32_t backup_handle = 0;
 
        if (req->multisample_count != 0)
                return -EINVAL;
 
+       if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+               return -EINVAL;
+
        if (unlikely(vmw_user_surface_size == 0))
                vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
                        128;
@@ -1314,12 +1317,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
                ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
                                             &res->backup,
                                             &user_srf->backup_base);
-               if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
-                   res->backup_size) {
-                       DRM_ERROR("Surface backup buffer is too small.\n");
-                       vmw_dmabuf_unreference(&res->backup);
-                       ret = -EINVAL;
-                       goto out_unlock;
+               if (ret == 0) {
+                       if (res->backup->base.num_pages * PAGE_SIZE <
+                           res->backup_size) {
+                               DRM_ERROR("Surface backup buffer is too small.\n");
+                               vmw_dmabuf_unreference(&res->backup);
+                               ret = -EINVAL;
+                               goto out_unlock;
+                       } else {
+                               backup_handle = req->buffer_handle;
+                       }
                }
        } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
                ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1491,7 +1498,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
                                 dev_priv->stdu_max_height);
 
                if (size.width > max_width || size.height > max_height) {
-                       DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u",
+                       DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
                                  size.width, size.height,
                                  max_width, max_height);
                        return -EINVAL;
index 16d556816b5fcaa62758549d9bceaa88bd4bc839..2fb5f432a54c1afd0f7c6104facb1860dbcb3f3a 100644 (file)
@@ -725,15 +725,16 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
        spin_lock_irqsave(&ipu->lock, flags);
 
        val = ipu_cm_read(ipu, IPU_CONF);
-       if (vdi) {
+       if (vdi)
                val |= IPU_CONF_IC_INPUT;
-       } else {
+       else
                val &= ~IPU_CONF_IC_INPUT;
-               if (csi_id == 1)
-                       val |= IPU_CONF_CSI_SEL;
-               else
-                       val &= ~IPU_CONF_CSI_SEL;
-       }
+
+       if (csi_id == 1)
+               val |= IPU_CONF_CSI_SEL;
+       else
+               val &= ~IPU_CONF_CSI_SEL;
+
        ipu_cm_write(ipu, val, IPU_CONF);
 
        spin_unlock_irqrestore(&ipu->lock, flags);
index c55563379e2e3ca2a1957ce777b2ac2c3586d9b7..c35f74c830657f26a3e29c34f7cef7e9f864f71a 100644 (file)
@@ -131,8 +131,6 @@ int ipu_pre_get(struct ipu_pre *pre)
        if (pre->in_use)
                return -EBUSY;
 
-       clk_prepare_enable(pre->clk_axi);
-
        /* first get the engine out of reset and remove clock gating */
        writel(0, pre->regs + IPU_PRE_CTRL);
 
@@ -149,12 +147,7 @@ int ipu_pre_get(struct ipu_pre *pre)
 
 void ipu_pre_put(struct ipu_pre *pre)
 {
-       u32 val;
-
-       val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE;
-       writel(val, pre->regs + IPU_PRE_CTRL);
-
-       clk_disable_unprepare(pre->clk_axi);
+       writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
 
        pre->in_use = false;
 }
@@ -249,6 +242,8 @@ static int ipu_pre_probe(struct platform_device *pdev)
        if (!pre->buffer_virt)
                return -ENOMEM;
 
+       clk_prepare_enable(pre->clk_axi);
+
        pre->dev = dev;
        platform_set_drvdata(pdev, pre);
        mutex_lock(&ipu_pre_list_mutex);
@@ -268,6 +263,8 @@ static int ipu_pre_remove(struct platform_device *pdev)
        available_pres--;
        mutex_unlock(&ipu_pre_list_mutex);
 
+       clk_disable_unprepare(pre->clk_axi);
+
        if (pre->buffer_virt)
                gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
                              IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
index fe40e5e499dd4122ce0f623d9df776c0a0cf2bdf..687705c5079422a82b9977e5694d0cd45b2a47bc 100644 (file)
@@ -275,10 +275,12 @@ config HID_EMS_FF
         - Trio Linker Plus II
 
 config HID_ELECOM
-       tristate "ELECOM BM084 bluetooth mouse"
+       tristate "ELECOM HID devices"
        depends on HID
        ---help---
-       Support for the ELECOM BM084 (bluetooth mouse).
+       Support for ELECOM devices:
+         - BM084 Bluetooth Mouse
+         - DEFT Trackball (Wired and wireless)
 
 config HID_ELO
        tristate "ELO USB 4000/4500 touchscreen"
index 16df6cc902359ea620de3f079be796d32be4783a..a6268f2f7408a520660c6add3c734a8006474393 100644 (file)
@@ -69,6 +69,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_IS_MULTITOUCH            BIT(3)
 #define QUIRK_NO_CONSUMER_USAGES       BIT(4)
 #define QUIRK_USE_KBD_BACKLIGHT                BIT(5)
+#define QUIRK_T100_KEYBOARD            BIT(6)
 
 #define I2C_KEYBOARD_QUIRKS                    (QUIRK_FIX_NOTEBOOK_REPORT | \
                                                 QUIRK_NO_INIT_REPORTS | \
@@ -536,6 +537,8 @@ static void asus_remove(struct hid_device *hdev)
                drvdata->kbd_backlight->removed = true;
                cancel_work_sync(&drvdata->kbd_backlight->work);
        }
+
+       hid_hw_stop(hdev);
 }
 
 static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -548,6 +551,12 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
                rdesc[55] = 0xdd;
        }
+       if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
+                *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
+               hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
+               rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
+       }
+
        return rdesc;
 }
 
@@ -560,6 +569,9 @@ static const struct hid_device_id asus_devices[] = {
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+               USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
+         QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
        { }
 };
 MODULE_DEVICE_TABLE(hid, asus_devices);
index 37084b6457851ebe0d52361e327db9df07b86b2c..04cee65531d761c18e53775ffc784c3c3d993daa 100644 (file)
@@ -1855,6 +1855,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1891,6 +1892,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
index 6e3848a8d8dd1416a0091ce0e7d263325dd8910b..e2c7465df69f3ae74c2cb1979c531b02e2934089 100644 (file)
@@ -1,10 +1,8 @@
 /*
- *  HID driver for Elecom BM084 (bluetooth mouse).
- *  Removes a non-existing horizontal wheel from
- *  the HID descriptor.
- *  (This module is based on "hid-ortek".)
- *
+ *  HID driver for ELECOM devices.
  *  Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
+ *  Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
+ *  Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
  */
 
 /*
 static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
-               hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
-               rdesc[47] = 0x00;
+       switch (hdev->product) {
+       case USB_DEVICE_ID_ELECOM_BM084:
+               /* The BM084 Bluetooth mouse includes a non-existing horizontal
+                * wheel in the HID descriptor. */
+               if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
+                       hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
+                       rdesc[47] = 0x00;
+               }
+               break;
+       case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
+       case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
+               /* The DEFT trackball has eight buttons, but its descriptor only
+                * reports five, disabling the three Fn buttons on the top of
+                * the mouse.
+                *
+                * Apply the following diff to the descriptor:
+                *
+                * Collection (Physical),              Collection (Physical),
+                *     Report ID (1),                      Report ID (1),
+                *     Report Count (5),           ->      Report Count (8),
+                *     Report Size (1),                    Report Size (1),
+                *     Usage Page (Button),                Usage Page (Button),
+                *     Usage Minimum (01h),                Usage Minimum (01h),
+                *     Usage Maximum (05h),        ->      Usage Maximum (08h),
+                *     Logical Minimum (0),                Logical Minimum (0),
+                *     Logical Maximum (1),                Logical Maximum (1),
+                *     Input (Variable),                   Input (Variable),
+                *     Report Count (1),           ->      Report Count (0),
+                *     Report Size (3),                    Report Size (3),
+                *     Input (Constant),                   Input (Constant),
+                *     Report Size (16),                   Report Size (16),
+                *     Report Count (2),                   Report Count (2),
+                *     Usage Page (Desktop),               Usage Page (Desktop),
+                *     Usage (X),                          Usage (X),
+                *     Usage (Y),                          Usage (Y),
+                *     Logical Minimum (-32768),           Logical Minimum (-32768),
+                *     Logical Maximum (32767),            Logical Maximum (32767),
+                *     Input (Variable, Relative),         Input (Variable, Relative),
+                * End Collection,                     End Collection,
+                */
+               if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
+                       hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+                       rdesc[13] = 8; /* Button/Variable Report Count */
+                       rdesc[21] = 8; /* Button/Variable Usage Maximum */
+                       rdesc[29] = 0; /* Button/Constant Report Count */
+               }
+               break;
        }
        return rdesc;
 }
 
 static const struct hid_device_id elecom_devices[] = {
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)},
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, elecom_devices);
index 643390ba749d96c5ddb5fbde68fda45c38569c3d..8ca1e8ce0af24e325957526c125ccf55d9081eb8 100644 (file)
 #define USB_VENDOR_ID_ASUSTEK          0x0b05
 #define USB_DEVICE_ID_ASUSTEK_LCM      0x1726
 #define USB_DEVICE_ID_ASUSTEK_LCM2     0x175b
+#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD    0x17e0
 #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD     0x8585
 #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD     0x0101
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
+#define USB_DEVICE_ID_ELECOM_DEFT_WIRED        0x00fe
+#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS     0x00ff
 
 #define USB_VENDOR_ID_DREAM_CHEEKY     0x1d34
 #define USB_DEVICE_ID_DREAM_CHEEKY_WN  0x0004
index 20b40ad2632503754685b84cc07d8787a4a44515..1d6c997b300149269367d00fb5db66b7c2ea25b7 100644 (file)
@@ -349,6 +349,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
 
        if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
                magicmouse_emit_buttons(msc, clicks & 3);
+               input_mt_report_pointer_emulation(input, true);
                input_report_rel(input, REL_X, x);
                input_report_rel(input, REL_Y, y);
        } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -388,16 +389,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
                __clear_bit(BTN_RIGHT, input->keybit);
                __clear_bit(BTN_MIDDLE, input->keybit);
                __set_bit(BTN_MOUSE, input->keybit);
-               __set_bit(BTN_TOOL_FINGER, input->keybit);
-               __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
-               __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
-               __set_bit(BTN_TOOL_QUADTAP, input->keybit);
-               __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
-               __set_bit(BTN_TOUCH, input->keybit);
-               __set_bit(INPUT_PROP_POINTER, input->propbit);
                __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
        }
 
+       __set_bit(BTN_TOOL_FINGER, input->keybit);
+       __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+       __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+       __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+       __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
+       __set_bit(BTN_TOUCH, input->keybit);
+       __set_bit(INPUT_PROP_POINTER, input->propbit);
 
        __set_bit(EV_ABS, input->evbit);
 
index 8daa8ce64ebba51e91e57ec801fa7e702fb2a072..fb55fb4c39fcfecaca55c0b8720d28d2f9717678 100644 (file)
@@ -897,6 +897,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        return 0;
 }
 
+static void i2c_hid_acpi_fix_up_power(struct device *dev)
+{
+       acpi_handle handle = ACPI_HANDLE(dev);
+       struct acpi_device *adev;
+
+       if (handle && acpi_bus_get_device(handle, &adev) == 0)
+               acpi_device_fix_up_power(adev);
+}
+
 static const struct acpi_device_id i2c_hid_acpi_match[] = {
        {"ACPI0C50", 0 },
        {"PNP0C50", 0 },
@@ -909,6 +918,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
 {
        return -ENODEV;
 }
+
+static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
 #endif
 
 #ifdef CONFIG_OF
@@ -1030,6 +1041,8 @@ static int i2c_hid_probe(struct i2c_client *client,
        if (ret < 0)
                goto err_regulator;
 
+       i2c_hid_acpi_fix_up_power(&client->dev);
+
        pm_runtime_get_noresume(&client->dev);
        pm_runtime_set_active(&client->dev);
        pm_runtime_enable(&client->dev);
index 4b225fb19a16842f635026d1b1023d5d1cf5068e..e274c9dc32f3a211d97d02f2b6477f20c9121fac 100644 (file)
@@ -1571,37 +1571,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
 {
        unsigned char *data = wacom->data;
 
-       if (wacom->pen_input)
+       if (wacom->pen_input) {
                dev_dbg(wacom->pen_input->dev.parent,
                        "%s: received report #%d\n", __func__, data[0]);
-       else if (wacom->touch_input)
+
+               if (len == WACOM_PKGLEN_PENABLED ||
+                   data[0] == WACOM_REPORT_PENABLED)
+                       return wacom_tpc_pen(wacom);
+       }
+       else if (wacom->touch_input) {
                dev_dbg(wacom->touch_input->dev.parent,
                        "%s: received report #%d\n", __func__, data[0]);
 
-       switch (len) {
-       case WACOM_PKGLEN_TPC1FG:
-               return wacom_tpc_single_touch(wacom, len);
+               switch (len) {
+               case WACOM_PKGLEN_TPC1FG:
+                       return wacom_tpc_single_touch(wacom, len);
 
-       case WACOM_PKGLEN_TPC2FG:
-               return wacom_tpc_mt_touch(wacom);
+               case WACOM_PKGLEN_TPC2FG:
+                       return wacom_tpc_mt_touch(wacom);
 
-       case WACOM_PKGLEN_PENABLED:
-               return wacom_tpc_pen(wacom);
+               default:
+                       switch (data[0]) {
+                       case WACOM_REPORT_TPC1FG:
+                       case WACOM_REPORT_TPCHID:
+                       case WACOM_REPORT_TPCST:
+                       case WACOM_REPORT_TPC1FGE:
+                               return wacom_tpc_single_touch(wacom, len);
 
-       default:
-               switch (data[0]) {
-               case WACOM_REPORT_TPC1FG:
-               case WACOM_REPORT_TPCHID:
-               case WACOM_REPORT_TPCST:
-               case WACOM_REPORT_TPC1FGE:
-                       return wacom_tpc_single_touch(wacom, len);
-
-               case WACOM_REPORT_TPCMT:
-               case WACOM_REPORT_TPCMT2:
-                       return wacom_mt_touch(wacom);
+                       case WACOM_REPORT_TPCMT:
+                       case WACOM_REPORT_TPCMT2:
+                               return wacom_mt_touch(wacom);
 
-               case WACOM_REPORT_PENABLED:
-                       return wacom_tpc_pen(wacom);
+                       }
                }
        }
 
index 22d5eafd681541374817314a1a2db164bb235ce7..5ef2814345ef7f37aa47fd47b8765ce6cc7fad1b 100644 (file)
@@ -343,6 +343,7 @@ config SENSORS_ASB100
 
 config SENSORS_ASPEED
        tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver"
+       select REGMAP
        help
          This driver provides support for ASPEED AST2400/AST2500 PWM
          and Fan Tacho controllers.
index 48403a2115beb68df404766350cfa844974a55e3..9de13d626c6896d379da385ab48da29258e5f3ee 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/errno.h>
 #include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/hwmon.h>
@@ -494,7 +495,7 @@ static u32 aspeed_get_fan_tach_ch_measure_period(struct aspeed_pwm_tacho_data
        return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit);
 }
 
-static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
                                      u8 fan_tach_ch)
 {
        u32 raw_data, tach_div, clk_source, sec, val;
@@ -510,6 +511,9 @@ static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
        msleep(sec);
 
        regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val);
+       if (!(val & RESULT_STATUS_MASK))
+               return -ETIMEDOUT;
+
        raw_data = val & RESULT_VALUE_MASK;
        tach_div = priv->type_fan_tach_clock_division[type];
        tach_div = 0x4 << (tach_div * 2);
@@ -561,12 +565,14 @@ static ssize_t show_rpm(struct device *dev, struct device_attribute *attr,
 {
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
        int index = sensor_attr->index;
-       u32 rpm;
+       int rpm;
        struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
 
        rpm = aspeed_get_fan_tach_ch_rpm(priv, index);
+       if (rpm < 0)
+               return rpm;
 
-       return sprintf(buf, "%u\n", rpm);
+       return sprintf(buf, "%d\n", rpm);
 }
 
 static umode_t pwm_is_visible(struct kobject *kobj,
@@ -591,24 +597,23 @@ static umode_t fan_dev_is_visible(struct kobject *kobj,
        return a->mode;
 }
 
-static SENSOR_DEVICE_ATTR(pwm0, 0644,
-                       show_pwm, set_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm1, 0644,
-                       show_pwm, set_pwm, 1);
+                       show_pwm, set_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm2, 0644,
-                       show_pwm, set_pwm, 2);
+                       show_pwm, set_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, 0644,
-                       show_pwm, set_pwm, 3);
+                       show_pwm, set_pwm, 2);
 static SENSOR_DEVICE_ATTR(pwm4, 0644,
-                       show_pwm, set_pwm, 4);
+                       show_pwm, set_pwm, 3);
 static SENSOR_DEVICE_ATTR(pwm5, 0644,
-                       show_pwm, set_pwm, 5);
+                       show_pwm, set_pwm, 4);
 static SENSOR_DEVICE_ATTR(pwm6, 0644,
-                       show_pwm, set_pwm, 6);
+                       show_pwm, set_pwm, 5);
 static SENSOR_DEVICE_ATTR(pwm7, 0644,
+                       show_pwm, set_pwm, 6);
+static SENSOR_DEVICE_ATTR(pwm8, 0644,
                        show_pwm, set_pwm, 7);
 static struct attribute *pwm_dev_attrs[] = {
-       &sensor_dev_attr_pwm0.dev_attr.attr,
        &sensor_dev_attr_pwm1.dev_attr.attr,
        &sensor_dev_attr_pwm2.dev_attr.attr,
        &sensor_dev_attr_pwm3.dev_attr.attr,
@@ -616,6 +621,7 @@ static struct attribute *pwm_dev_attrs[] = {
        &sensor_dev_attr_pwm5.dev_attr.attr,
        &sensor_dev_attr_pwm6.dev_attr.attr,
        &sensor_dev_attr_pwm7.dev_attr.attr,
+       &sensor_dev_attr_pwm8.dev_attr.attr,
        NULL,
 };
 
@@ -624,40 +630,39 @@ static const struct attribute_group pwm_dev_group = {
        .is_visible = pwm_is_visible,
 };
 
-static SENSOR_DEVICE_ATTR(fan0_input, 0444,
-               show_rpm, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan1_input, 0444,
-               show_rpm, NULL, 1);
+               show_rpm, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan2_input, 0444,
-               show_rpm, NULL, 2);
+               show_rpm, NULL, 1);
 static SENSOR_DEVICE_ATTR(fan3_input, 0444,
-               show_rpm, NULL, 3);
+               show_rpm, NULL, 2);
 static SENSOR_DEVICE_ATTR(fan4_input, 0444,
-               show_rpm, NULL, 4);
+               show_rpm, NULL, 3);
 static SENSOR_DEVICE_ATTR(fan5_input, 0444,
-               show_rpm, NULL, 5);
+               show_rpm, NULL, 4);
 static SENSOR_DEVICE_ATTR(fan6_input, 0444,
-               show_rpm, NULL, 6);
+               show_rpm, NULL, 5);
 static SENSOR_DEVICE_ATTR(fan7_input, 0444,
-               show_rpm, NULL, 7);
+               show_rpm, NULL, 6);
 static SENSOR_DEVICE_ATTR(fan8_input, 0444,
-               show_rpm, NULL, 8);
+               show_rpm, NULL, 7);
 static SENSOR_DEVICE_ATTR(fan9_input, 0444,
-               show_rpm, NULL, 9);
+               show_rpm, NULL, 8);
 static SENSOR_DEVICE_ATTR(fan10_input, 0444,
-               show_rpm, NULL, 10);
+               show_rpm, NULL, 9);
 static SENSOR_DEVICE_ATTR(fan11_input, 0444,
-               show_rpm, NULL, 11);
+               show_rpm, NULL, 10);
 static SENSOR_DEVICE_ATTR(fan12_input, 0444,
-               show_rpm, NULL, 12);
+               show_rpm, NULL, 11);
 static SENSOR_DEVICE_ATTR(fan13_input, 0444,
-               show_rpm, NULL, 13);
+               show_rpm, NULL, 12);
 static SENSOR_DEVICE_ATTR(fan14_input, 0444,
-               show_rpm, NULL, 14);
+               show_rpm, NULL, 13);
 static SENSOR_DEVICE_ATTR(fan15_input, 0444,
+               show_rpm, NULL, 14);
+static SENSOR_DEVICE_ATTR(fan16_input, 0444,
                show_rpm, NULL, 15);
 static struct attribute *fan_dev_attrs[] = {
-       &sensor_dev_attr_fan0_input.dev_attr.attr,
        &sensor_dev_attr_fan1_input.dev_attr.attr,
        &sensor_dev_attr_fan2_input.dev_attr.attr,
        &sensor_dev_attr_fan3_input.dev_attr.attr,
@@ -673,6 +678,7 @@ static struct attribute *fan_dev_attrs[] = {
        &sensor_dev_attr_fan13_input.dev_attr.attr,
        &sensor_dev_attr_fan14_input.dev_attr.attr,
        &sensor_dev_attr_fan15_input.dev_attr.attr,
+       &sensor_dev_attr_fan16_input.dev_attr.attr,
        NULL
 };
 
@@ -802,7 +808,6 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
                if (ret)
                        return ret;
        }
-       of_node_put(np);
 
        priv->groups[0] = &pwm_dev_group;
        priv->groups[1] = &fan_dev_group;
index 6283b99d2b17f8ec3e22e4d4b0c08dad78d15d2b..d1263b82d646a1524697b38e2f63b31bd14f153d 100644 (file)
@@ -94,9 +94,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
 static int dw_i2c_acpi_configure(struct platform_device *pdev)
 {
        struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+       u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0;
        acpi_handle handle = ACPI_HANDLE(&pdev->dev);
        const struct acpi_device_id *id;
-       u32 ss_ht, fp_ht, hs_ht, fs_ht;
        struct acpi_device *adev;
        const char *uid;
 
index 0ed77eeff31e661fe5a4c6b8426c7d2cd6a10f89..a2e3dd715380c74397a5cf2c1a778786eb12826b 100644 (file)
@@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd,
                    int value, int index, void *data, int len)
 {
        struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+       void *dmadata = kmalloc(len, GFP_KERNEL);
+       int ret;
+
+       if (!dmadata)
+               return -ENOMEM;
 
        /* do control transfer */
-       return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
+       ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
                               cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
-                              USB_DIR_IN, value, index, data, len, 2000);
+                              USB_DIR_IN, value, index, dmadata, len, 2000);
+
+       memcpy(data, dmadata, len);
+       kfree(dmadata);
+       return ret;
 }
 
 static int usb_write(struct i2c_adapter *adapter, int cmd,
                     int value, int index, void *data, int len)
 {
        struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+       void *dmadata = kmemdup(data, len, GFP_KERNEL);
+       int ret;
+
+       if (!dmadata)
+               return -ENOMEM;
 
        /* do control transfer */
-       return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
+       ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
                               cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
-                              value, index, data, len, 2000);
+                              value, index, dmadata, len, 2000);
+
+       kfree(dmadata);
+       return ret;
 }
 
 static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
index 1844770f3ae838bd63f3837bc0f475b0c8b5f1b1..2b4d613a347491295a51932699264c6edcd4bb51 100644 (file)
@@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
        primary_path->packet_life_time =
                cm_req_get_primary_local_ack_timeout(req_msg);
        primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
-       sa_path_set_service_id(primary_path, req_msg->service_id);
+       primary_path->service_id = req_msg->service_id;
 
        if (req_msg->alt_local_lid) {
                alt_path->dgid = req_msg->alt_local_gid;
@@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
                alt_path->packet_life_time =
                        cm_req_get_alt_local_ack_timeout(req_msg);
                alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
-               sa_path_set_service_id(alt_path, req_msg->service_id);
+               alt_path->service_id = req_msg->service_id;
        }
 }
 
index 91b7a2fe5a55488ce1e5bb886ba19023bd624328..31bb82d8ecd7f19bbee90bd95a83cec7fe5abca7 100644 (file)
@@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
                        ib->sib_pkey = path->pkey;
                        ib->sib_flowinfo = path->flow_label;
                        memcpy(&ib->sib_addr, &path->sgid, 16);
-                       ib->sib_sid = sa_path_get_service_id(path);
+                       ib->sib_sid = path->service_id;
                        ib->sib_scope_id = 0;
                } else {
                        ib->sib_pkey = listen_ib->sib_pkey;
@@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
                memcpy(&req->local_gid, &req_param->primary_path->sgid,
                       sizeof(req->local_gid));
                req->has_gid    = true;
-               req->service_id =
-                       sa_path_get_service_id(req_param->primary_path);
+               req->service_id = req_param->primary_path->service_id;
                req->pkey       = be16_to_cpu(req_param->primary_path->pkey);
                if (req->pkey != req_param->bth_pkey)
                        pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
@@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
        struct rdma_route *rt;
        const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
        struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
-       const __be64 service_id = sa_path_get_service_id(path);
+       const __be64 service_id =
+               ib_event->param.req_rcvd.primary_path->service_id;
        int ret;
 
        id = rdma_create_id(listen_id->route.addr.dev_addr.net,
@@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
        path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
        path_rec.numb_path = 1;
        path_rec.reversible = 1;
-       sa_path_set_service_id(&path_rec,
-                              rdma_get_service_id(&id_priv->id,
-                                                  cma_dst_addr(id_priv)));
+       path_rec.service_id = rdma_get_service_id(&id_priv->id,
+                                                 cma_dst_addr(id_priv));
 
        comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
                    IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
index cb7d372e4bdf877206d8da9141d46a590395bcd9..d92ab4eaa8f311a44bc3854107d9b7f352c0bca7 100644 (file)
@@ -169,6 +169,16 @@ void ib_mad_cleanup(void);
 int ib_sa_init(void);
 void ib_sa_cleanup(void);
 
+int ibnl_init(void);
+void ibnl_cleanup(void);
+
+/**
+ * Check if there are any listeners to the netlink group
+ * @group: the netlink group ID
+ * Returns 0 on success or a negative for no listeners.
+ */
+int ibnl_chk_listeners(unsigned int group);
+
 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
                              struct netlink_callback *cb);
 int ib_nl_handle_set_timeout(struct sk_buff *skb,
index b784055423c8346b80e00f98d58f6499e7adc8ef..94931c474d41db72b606f654cb0d334087600ee5 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <rdma/rdma_netlink.h>
+#include "core_priv.h"
 
 struct ibnl_client {
        struct list_head                list;
@@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group)
                return -1;
        return 0;
 }
-EXPORT_SYMBOL(ibnl_chk_listeners);
 
 int ibnl_add_client(int index, int nops,
                    const struct ibnl_client_cbs cb_table[])
index e335b09c022ef69c58db15e8c37a43bfce9e8a42..fb7aec4047c8be90d0b70c39f66fdf6606c6fd7e 100644 (file)
@@ -194,7 +194,7 @@ static u32 tid;
        .field_name          = "sa_path_rec:" #field
 
 static const struct ib_field path_rec_table[] = {
-       { PATH_REC_FIELD(ib.service_id),
+       { PATH_REC_FIELD(service_id),
          .offset_words = 0,
          .offset_bits  = 0,
          .size_bits    = 64 },
@@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = {
        .field_name          = "sa_path_rec:" #field
 
 static const struct ib_field opa_path_rec_table[] = {
-       { OPA_PATH_REC_FIELD(opa.service_id),
+       { OPA_PATH_REC_FIELD(service_id),
          .offset_words = 0,
          .offset_bits  = 0,
          .size_bits    = 64 },
@@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
 
        /* Now build the attributes */
        if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
-               val64 = be64_to_cpu(sa_path_get_service_id(sa_rec));
+               val64 = be64_to_cpu(sa_rec->service_id);
                nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
                        sizeof(val64), &val64);
        }
index 3dbf811d3c517232e4ebbff5f6970d34d8f518ca..21e60b1e2ff41b1c27e98ebad68e5f4b0ccb7f42 100644 (file)
@@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
        for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
 
                page = sg_page(sg);
-               if (umem->writable && dirty)
+               if (!PageDirty(page) && umem->writable && dirty)
                        set_page_dirty_lock(page);
                put_page(page);
        }
index 0780b1afefa9d996c870e6982bfc9c5f9b13fb2a..8c4ec564e49583f6d05eab8df5e57e4378582f08 100644 (file)
@@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
                struct vm_area_struct *vma;
                struct hstate *h;
 
+               down_read(&mm->mmap_sem);
                vma = find_vma(mm, ib_umem_start(umem));
-               if (!vma || !is_vm_hugetlb_page(vma))
+               if (!vma || !is_vm_hugetlb_page(vma)) {
+                       up_read(&mm->mmap_sem);
                        return -EINVAL;
+               }
                h = hstate_vma(vma);
                umem->page_shift = huge_page_shift(h);
+               up_read(&mm->mmap_sem);
                umem->hugetlb = 1;
        } else {
                umem->hugetlb = 0;
index 8b9587fe23033fd69708dd60c7e76b18eb2e23ff..94fd989c90600b2640d2fb36f2397c9814160583 100644 (file)
@@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
 }
 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
 
-void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
-                               struct sa_path_rec *src)
+static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
+                                      struct sa_path_rec *src)
 {
-       memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid);
-       memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid);
+       memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid));
+       memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid));
 
        dst->dlid               = htons(ntohl(sa_path_get_dlid(src)));
        dst->slid               = htons(ntohl(sa_path_get_slid(src)));
index b6fe45924c6ed5a3a835c89f00b9265a940e57ef..0910faf3587b547e873bc4e5572e7defd93623b3 100644 (file)
@@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
 
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        release_ep_resources(ep);
+       kfree_skb(skb);
        return 0;
 }
 
@@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        c4iw_put_ep(&ep->parent_ep->com);
        release_ep_resources(ep);
+       kfree_skb(skb);
        return 0;
 }
 
@@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
 
        pr_debug("%s rdev %p\n", __func__, rdev);
        req->cmd = CPL_ABORT_NO_RST;
+       skb_get(skb);
        ret = c4iw_ofld_send(rdev, skb);
        if (ret) {
                __state_set(&ep->com, DEAD);
                queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
-       }
+       } else
+               kfree_skb(skb);
 }
 
 static int send_flowc(struct c4iw_ep *ep)
@@ -2517,7 +2521,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
 
-       hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
+       hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+              sizeof(struct tcphdr) +
               ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
        if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
                child_ep->mtu = peer_mss + hdrs;
index 329fb65e8fb0edfafd19d6fbea17196f810f29d5..f96a96dbcf1ff4e40b75de36122a3efd0405faae 100644 (file)
@@ -971,7 +971,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
                 devp->rdev.lldi.sge_egrstatuspagesize);
 
        devp->rdev.hw_queue.t4_eq_status_entries =
-               devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
+               devp->rdev.lldi.sge_egrstatuspagesize / 64;
        devp->rdev.hw_queue.t4_max_eq_size = 65520;
        devp->rdev.hw_queue.t4_max_iq_size = 65520;
        devp->rdev.hw_queue.t4_max_rq_size = 8192 -
index 5d6b1eeaa9a0a14c1088655cd5f049b5d3defa91..2ba00b89df6a046bba536cfe889c373d9063ced0 100644 (file)
@@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd)
        }
 }
 
-static void write_global_credit(struct hfi1_devdata *dd,
-                               u8 vau, u16 total, u16 shared)
+/*
+ * Set up allocation unit vaulue.
+ */
+void set_up_vau(struct hfi1_devdata *dd, u8 vau)
 {
-       write_csr(dd, SEND_CM_GLOBAL_CREDIT,
-                 ((u64)total <<
-                  SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
-                 ((u64)shared <<
-                  SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
-                 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
+       u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+       /* do not modify other values in the register */
+       reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
+       reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
+       write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
 }
 
 /*
  * Set up initial VL15 credits of the remote.  Assumes the rest of
- * the CM credit registers are zero from a previous global or credit reset .
+ * the CM credit registers are zero from a previous global or credit reset.
+ * Shared limit for VL15 will always be 0.
  */
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
 {
-       /* leave shared count at zero for both global and VL15 */
-       write_global_credit(dd, vau, vl15buf, 0);
+       u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+       /* set initial values for total and shared credit limit */
+       reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
+                SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
+
+       /*
+        * Set total limit to be equal to VL15 credits.
+        * Leave shared limit at 0.
+        */
+       reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
+       write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
 
        write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
                  << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
@@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd)
        for (i = 0; i < TXE_NUM_DATA_VL; i++)
                write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
        write_csr(dd, SEND_CM_CREDIT_VL15, 0);
-       write_global_credit(dd, 0, 0, 0);
+       write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
        /* reset the CM block */
        pio_send_control(dd, PSC_CM_RESET);
+       /* reset cached value */
+       dd->vl15buf_cached = 0;
 }
 
 /* convert a vCU to a CU */
@@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work)
 {
        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
                                                  link_up_work);
+       struct hfi1_devdata *dd = ppd->dd;
+
        set_link_state(ppd, HLS_UP_INIT);
 
        /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
-       read_ltp_rtt(ppd->dd);
+       read_ltp_rtt(dd);
        /*
         * OPA specifies that certain counters are cleared on a transition
         * to link up, so do that.
         */
-       clear_linkup_counters(ppd->dd);
+       clear_linkup_counters(dd);
        /*
         * And (re)set link up default values.
         */
        set_linkup_defaults(ppd);
 
+       /*
+        * Set VL15 credits. Use cached value from verify cap interrupt.
+        * In case of quick linkup or simulator, vl15 value will be set by
+        * handle_linkup_change. VerifyCap interrupt handler will not be
+        * called in those scenarios.
+        */
+       if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
+               set_up_vl15(dd, dd->vl15buf_cached);
+
        /* enforce link speed enabled */
        if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
                /* oops - current speed is not enabled, bounce */
-               dd_dev_err(ppd->dd,
+               dd_dev_err(dd,
                           "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
                           ppd->link_speed_active, ppd->link_speed_enabled);
                set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
@@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work)
         */
        if (vau == 0)
                vau = 1;
-       set_up_vl15(dd, vau, vl15buf);
+       set_up_vau(dd, vau);
+
+       /*
+        * Set VL15 credits to 0 in global credit register. Cache remote VL15
+        * credits value and wait for link-up interrupt ot set it.
+        */
+       set_up_vl15(dd, 0);
+       dd->vl15buf_cached = vl15buf;
 
        /* set up the LCB CRC mode */
        crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
index 5bfa839d1c48bc14939f680e74b696ac43105119..793514f1d15fb4a82357ba755dd97c7855f076ad 100644 (file)
 #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
 #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
 #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
+#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull
 #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
+#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull
 #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
index da322e6668cc5893262c3316486147cac6df1ca9..414a04a481c2abab3b3fe44d3949d591e684c065 100644 (file)
@@ -1045,6 +1045,14 @@ struct hfi1_devdata {
        /* initial vl15 credits to use */
        u16 vl15_init;
 
+       /*
+        * Cached value for vl15buf, read during verify cap interrupt. VL15
+        * credits are to be kept at 0 and set when handling the link-up
+        * interrupt. This removes the possibility of receiving VL15 MAD
+        * packets before this HFI is ready.
+        */
+       u16 vl15buf_cached;
+
        /* Misc small ints */
        u8 n_krcv_queues;
        u8 qos_shift;
@@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
 
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
+void set_up_vau(struct hfi1_devdata *dd, u8 vau);
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
 void reset_link_credits(struct hfi1_devdata *dd);
 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
 
index ba265d0ae93b4a96d6c29364f30b34c6046c335b..04a5082d5ac55259d992db8843cd361797b2992c 100644 (file)
@@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
                 * the remote values.  Both sides must be using the values.
                 */
                if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
-                       set_up_vl15(dd, dd->vau, dd->vl15_init);
+                       set_up_vau(dd, dd->vau);
+                       set_up_vl15(dd, dd->vl15_init);
                        assign_remote_cm_au_table(dd, dd->vcu);
                }
 
index 93faf86d54b620fb2cc621844932d811c17868dd..6a9f6f9819e1a326b0ed5f15f831d10cec2620a6 100644 (file)
@@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
        /*
         * Save BARs and command to rewrite after device reset.
         */
-       dd->pcibar0 = addr;
-       dd->pcibar1 = addr >> 32;
+       pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0);
+       pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1);
        pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
        pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
        pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
index 069bdaf061ab923cbc8b123ab182806fdb3c4dac..1080778a1f7c4a38816ce02058f63baae862d89e 100644 (file)
@@ -2159,8 +2159,11 @@ send_last:
                ret = hfi1_rvt_get_rwqe(qp, 1);
                if (ret < 0)
                        goto nack_op_err;
-               if (!ret)
+               if (!ret) {
+                       /* peer will send again */
+                       rvt_put_ss(&qp->r_sge);
                        goto rnr_nak;
+               }
                wc.ex.imm_data = ohdr->u.rc.imm_data;
                wc.wc_flags = IB_WC_WITH_IMM;
                goto send_last;
index 50d140d25e38fa3cce8b4512b86099c633f1bc6c..2f3bbcac1e3492ff6cfeb5d92ebef73bda8b59fd 100644 (file)
@@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = {
 };
 
 static struct attribute *port_cc_default_attributes[] = {
-       &cc_prescan_attr.attr
+       &cc_prescan_attr.attr,
+       NULL
 };
 
 static struct kobj_type port_cc_ktype = {
index f3bc01bce483fe5d81ba08e2a60212e4f0f32b98..6ae98aa7f74ebb14f4ce2e9e3cee7c629270253f 100644 (file)
@@ -784,7 +784,6 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
        }
 
        ctrl_ird |= IETF_PEER_TO_PEER;
-       ctrl_ird |= IETF_FLPDU_ZERO_LEN;
 
        switch (mpa_key) {
        case MPA_KEY_REQUEST:
@@ -2446,8 +2445,8 @@ static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
                } else {
                        type = I40IW_CM_EVENT_CONNECTED;
                        cm_node->state = I40IW_CM_STATE_OFFLOADED;
-                       i40iw_send_ack(cm_node);
                }
+               i40iw_send_ack(cm_node);
                break;
        default:
                pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
index f82483b3d1e7dc205fa2f6a5e8037fd4d6866eab..a027e2072477aef12a230fdc79a1a1d6668bf15c 100644 (file)
@@ -285,28 +285,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
        struct i40iw_sc_dev *dev = vsi->dev;
        struct i40iw_sc_qp *qp = NULL;
        bool qs_handle_change = false;
-       bool mss_change = false;
        unsigned long flags;
        u16 qs_handle;
        int i;
 
-       if (vsi->mss != l2params->mss) {
-               mss_change = true;
-               vsi->mss = l2params->mss;
-       }
+       vsi->mss = l2params->mss;
 
        i40iw_fill_qos_list(l2params->qs_handle_list);
        for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
                qs_handle = l2params->qs_handle_list[i];
                if (vsi->qos[i].qs_handle != qs_handle)
                        qs_handle_change = true;
-               else if (!mss_change)
-                       continue;       /* no MSS nor qs handle change */
                spin_lock_irqsave(&vsi->qos[i].lock, flags);
                qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
                while (qp) {
-                       if (mss_change)
-                               i40iw_qp_mss_modify(dev, qp);
                        if (qs_handle_change) {
                                qp->qs_handle = qs_handle;
                                /* issue cqp suspend command */
@@ -2395,7 +2387,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
 
        set_64bit_val(wqe,
                      8,
-                     LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
                      LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
 
        set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
@@ -2410,7 +2401,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
                 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
                 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
                 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
-                LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
                 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
                 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
                 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
index 2728af3103ce9ae285ab8c0b88980e716a1cb5e4..a3f18a22f5ed1787794031eb8a1765ab2804cdc4 100644 (file)
@@ -1319,13 +1319,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
        status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
                                       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
        if (status)
-               goto exit;
+               goto error;
        info.fpm_query_buf_pa = mem.pa;
        info.fpm_query_buf = mem.va;
        status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
                                       I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
        if (status)
-               goto exit;
+               goto error;
        info.fpm_commit_buf_pa = mem.pa;
        info.fpm_commit_buf = mem.va;
        info.hmc_fn_id = ldev->fid;
@@ -1347,11 +1347,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
        info.exception_lan_queue = 1;
        info.vchnl_send = i40iw_virtchnl_send;
        status = i40iw_device_init(&iwdev->sc_dev, &info);
-exit:
-       if (status) {
-               kfree(iwdev->hmc_info_mem);
-               iwdev->hmc_info_mem = NULL;
-       }
+
+       if (status)
+               goto error;
        memset(&vsi_info, 0, sizeof(vsi_info));
        vsi_info.dev = &iwdev->sc_dev;
        vsi_info.back_vsi = (void *)iwdev;
@@ -1362,11 +1360,19 @@ exit:
                memset(&stats_info, 0, sizeof(stats_info));
                stats_info.fcn_id = ldev->fid;
                stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+               if (!stats_info.pestat) {
+                       status = I40IW_ERR_NO_MEMORY;
+                       goto error;
+               }
                stats_info.stats_initialize = true;
                if (stats_info.pestat)
                        i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
        }
        return status;
+error:
+       kfree(iwdev->hmc_info_mem);
+       iwdev->hmc_info_mem = NULL;
+       return status;
 }
 
 /**
index aa66c1c63dfa4b0879eecfdb4a46f58f3279de98..f27be3e7830bb438f5543d88ef35eae1c4d93586 100644 (file)
@@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
                            struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
 void *i40iw_remove_head(struct list_head *list);
 void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
 
 void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
 void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
index 7b76259752b0062e5cf16f7bc097f5cd4b66098e..959ec81fba99ca6499f2e8d7d68f0cb073c86646 100644 (file)
@@ -541,7 +541,6 @@ struct i40iw_create_qp_info {
 struct i40iw_modify_qp_info {
        u64 rx_win0;
        u64 rx_win1;
-       u16 new_mss;
        u8 next_iwarp_state;
        u8 termlen;
        bool ord_valid;
@@ -554,7 +553,6 @@ struct i40iw_modify_qp_info {
        bool dont_send_term;
        bool dont_send_fin;
        bool cached_var_valid;
-       bool mss_change;
        bool force_loopback;
 };
 
index 409a3781e735db6f2072bde350815beed380425e..56d986924a4c1708216684f776f4705451a65f79 100644 (file)
@@ -756,23 +756,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b
                i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
 }
 
-/**
- * i40iw_qp_mss_modify - modify mss for qp
- * @dev: hardware control device structure
- * @qp: hardware control qp
- */
-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
-{
-       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-       struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
-       struct i40iw_modify_qp_info info;
-
-       memset(&info, 0, sizeof(info));
-       info.mss_change = true;
-       info.new_mss = qp->vsi->mss;
-       i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
-}
-
 /**
  * i40iw_term_modify_qp - modify qp for term message
  * @qp: hardware control qp
index f4d13683a403a6369c61d40d0a19cc4e5124334d..48fd327f876b08b5b246b41425ff997e64cf861d 100644 (file)
@@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
        if (!dev->vchnl_up)
                return I40IW_ERR_NOT_READY;
        if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
-               if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
-                       vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
-               else
-                       vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+               vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
                return I40IW_SUCCESS;
        }
        for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
index b4694717f6f301f4a2d6b407f18b2c42614ee24e..21d31cb1325f5fc0271f093e5ec069641b8b9ffe 100644 (file)
@@ -1578,6 +1578,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
        if (port < 0)
                return;
        ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+       ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
 
        mlx4_ib_query_ah(&ah.ibah, &ah_attr);
        if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
index d45772da09635c2164f4cef8bcf5255c17fe8cff..0c79983c8b1a0a4e6189fb5e02439ff3522a9098 100644 (file)
@@ -2979,6 +2979,18 @@ error_0:
        return ret;
 }
 
+static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
+{
+       switch (umr_fence_cap) {
+       case MLX5_CAP_UMR_FENCE_NONE:
+               return MLX5_FENCE_MODE_NONE;
+       case MLX5_CAP_UMR_FENCE_SMALL:
+               return MLX5_FENCE_MODE_INITIATOR_SMALL;
+       default:
+               return MLX5_FENCE_MODE_STRONG_ORDERING;
+       }
+}
+
 static int create_dev_resources(struct mlx5_ib_resources *devr)
 {
        struct ib_srq_init_attr attr;
@@ -3693,6 +3705,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 
        mlx5_ib_internal_fill_odp_caps(dev);
 
+       dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
+
        if (MLX5_CAP_GEN(mdev, imaicl)) {
                dev->ib_dev.alloc_mw            = mlx5_ib_alloc_mw;
                dev->ib_dev.dealloc_mw          = mlx5_ib_dealloc_mw;
index 38c877bc45e592dbbe9670b3c9b624b90b6df30d..bdcf25410c99df7f57e280f2672eb86b8e3205fd 100644 (file)
@@ -349,7 +349,7 @@ struct mlx5_ib_qp {
        struct mlx5_ib_wq       rq;
 
        u8                      sq_signal_bits;
-       u8                      fm_cache;
+       u8                      next_fence;
        struct mlx5_ib_wq       sq;
 
        /* serialize qp state modifications
@@ -654,6 +654,7 @@ struct mlx5_ib_dev {
        struct mlx5_ib_port     *port;
        struct mlx5_sq_bfreg     bfreg;
        struct mlx5_sq_bfreg     fp_bfreg;
+       u8                              umr_fence;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
index 93959e1e43a3da5e2f71a89c8c12bdc61368e3e0..ebb6768684de372d755cff4c6f92c768edb9045b 100644 (file)
@@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
        }
 }
 
-static u8 get_fence(u8 fence, struct ib_send_wr *wr)
-{
-       if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
-                    wr->send_flags & IB_SEND_FENCE))
-               return MLX5_FENCE_MODE_STRONG_ORDERING;
-
-       if (unlikely(fence)) {
-               if (wr->send_flags & IB_SEND_FENCE)
-                       return MLX5_FENCE_MODE_SMALL_AND_FENCE;
-               else
-                       return fence;
-       } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
-               return MLX5_FENCE_MODE_FENCE;
-       }
-
-       return 0;
-}
-
 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
                     struct mlx5_wqe_ctrl_seg **ctrl,
                     struct ib_send_wr *wr, unsigned *idx,
@@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
 static void finish_wqe(struct mlx5_ib_qp *qp,
                       struct mlx5_wqe_ctrl_seg *ctrl,
                       u8 size, unsigned idx, u64 wr_id,
-                      int nreq, u8 fence, u8 next_fence,
-                      u32 mlx5_opcode)
+                      int nreq, u8 fence, u32 mlx5_opcode)
 {
        u8 opmod = 0;
 
@@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
                                             mlx5_opcode | ((u32)opmod << 24));
        ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
        ctrl->fm_ce_se |= fence;
-       qp->fm_cache = next_fence;
        if (unlikely(qp->wq_sig))
                ctrl->signature = wq_sig(ctrl);
 
@@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        goto out;
                }
 
-               fence = qp->fm_cache;
                num_sge = wr->num_sge;
                if (unlikely(num_sge > qp->sq.max_gs)) {
                        mlx5_ib_warn(dev, "\n");
@@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        goto out;
                }
 
+               if (wr->opcode == IB_WR_LOCAL_INV ||
+                   wr->opcode == IB_WR_REG_MR) {
+                       fence = dev->umr_fence;
+                       next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+               } else if (wr->send_flags & IB_SEND_FENCE) {
+                       if (qp->next_fence)
+                               fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+                       else
+                               fence = MLX5_FENCE_MODE_FENCE;
+               } else {
+                       fence = qp->next_fence;
+               }
+
                switch (ibqp->qp_type) {
                case IB_QPT_XRC_INI:
                        xrc = seg;
@@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                goto out;
 
                        case IB_WR_LOCAL_INV:
-                               next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
                                ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
                                set_linv_wr(qp, &seg, &size);
@@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                break;
 
                        case IB_WR_REG_MR:
-                               next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                qp->sq.wr_data[idx] = IB_WR_REG_MR;
                                ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
                                err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
@@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-                                          nreq, get_fence(fence, wr),
-                                          next_fence, MLX5_OPCODE_UMR);
+                               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+                                          fence, MLX5_OPCODE_UMR);
                                /*
                                 * SET_PSV WQEs are not signaled and solicited
                                 * on error
@@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-                                          nreq, get_fence(fence, wr),
-                                          next_fence, MLX5_OPCODE_SET_PSV);
+                               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+                                          fence, MLX5_OPCODE_SET_PSV);
                                err = begin_wqe(qp, &seg, &ctrl, wr,
                                                &idx, &size, nreq);
                                if (err) {
@@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
                                                 mr->sig->psv_wire.psv_idx, &seg,
                                                 &size);
@@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-                                          nreq, get_fence(fence, wr),
-                                          next_fence, MLX5_OPCODE_SET_PSV);
+                               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+                                          fence, MLX5_OPCODE_SET_PSV);
+                               qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                num_sge = 0;
                                goto skip_psv;
 
@@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        }
                }
 
-               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
-                          get_fence(fence, wr), next_fence,
+               qp->next_fence = next_fence;
+               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
                           mlx5_ib_opcode[wr->opcode]);
 skip_psv:
                if (0)
index fb983df7c157b660239983e00c85da5627863dec..30b256a2c54ec42dd97b29ff0f0cb15be6d44510 100644 (file)
@@ -610,7 +610,6 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
                ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD;
        }
        ctrl_ird |= IETF_PEER_TO_PEER;
-       ctrl_ird |= IETF_FLPDU_ZERO_LEN;
 
        switch (mpa_key) {
        case MPA_KEY_REQUEST:
@@ -1826,7 +1825,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
                        type = NES_CM_EVENT_CONNECTED;
                        cm_node->state = NES_CM_STATE_TSA;
                }
-
+               send_ack(cm_node, NULL);
                break;
        default:
                WARN_ON(1);
index 3d7705cec7705fcf334a96353e6830b554176500..d86dbe814d98fbe00adf22acb6d1ee658efc5ef5 100644 (file)
@@ -270,11 +270,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
                return rc;
        }
 
-       vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
-       if (vlan_id < VLAN_CFI_MASK)
-               has_vlan = true;
-       if (sgid_attr.ndev)
+       if (sgid_attr.ndev) {
+               vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+               if (vlan_id < VLAN_CFI_MASK)
+                       has_vlan = true;
+
                dev_put(sgid_attr.ndev);
+       }
 
        if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
                DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
index fc8b88514da52bc380ce51066e0a2665ff18a2be..4ddbcac5eabe6834f90ecac5885b75495493264a 100644 (file)
@@ -1956,8 +1956,10 @@ send_last:
                ret = qib_get_rwqe(qp, 1);
                if (ret < 0)
                        goto nack_op_err;
-               if (!ret)
+               if (!ret) {
+                       rvt_put_ss(&qp->r_sge);
                        goto rnr_nak;
+               }
                wc.ex.imm_data = ohdr->u.rc.imm_data;
                hdrsize += 4;
                wc.wc_flags = IB_WC_WITH_IMM;
index 874b24366e4dd744cc9ddf1c4b561a5a050641d1..7871379342f48fa77b2e6e8279ca774b4c49ad2f 100644 (file)
@@ -178,7 +178,7 @@ static inline int ib_speed_enum_to_int(int speed)
 static int ipoib_get_link_ksettings(struct net_device *netdev,
                                    struct ethtool_link_ksettings *cmd)
 {
-       struct ipoib_dev_priv *priv = netdev_priv(netdev);
+       struct ipoib_dev_priv *priv = ipoib_priv(netdev);
        struct ib_port_attr attr;
        int ret, speed, width;
 
index 2869d1adb1decdab20a19145536fc978344aba78..a115c0b7a310ed630c1c32ffd9e2c17574358f7c 100644 (file)
@@ -1590,7 +1590,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
        wait_for_completion(&priv->ntbl.deleted);
 }
 
-void ipoib_dev_uninit_default(struct net_device *dev)
+static void ipoib_dev_uninit_default(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
 
index def723a5df29fa72342ed5e52ec7fa35fa54375c..2354c742caa12d69ef761ecbc7ca8af311bb8aa5 100644 (file)
@@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
        ch->path.sgid = target->sgid;
        ch->path.dgid = target->orig_dgid;
        ch->path.pkey = target->pkey;
-       sa_path_set_service_id(&ch->path, target->service_id);
+       ch->path.service_id = target->service_id;
 
        return 0;
 }
@@ -575,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
        return 0;
 
 err_qp:
-       srp_destroy_qp(ch, qp);
+       ib_destroy_qp(qp);
 
 err_send_cq:
        ib_free_cq(send_cq);
index 485900f953e088ca91d0b1b538be6f6cc3ad6794..abc266e40e1710e034d05077ebbe7dc33ce42903 100644 (file)
@@ -213,7 +213,7 @@ static int tm2_touchkey_probe(struct i2c_client *client,
        /* led device */
        touchkey->led_dev.name = TM2_TOUCHKEY_DEV_NAME;
        touchkey->led_dev.brightness = LED_FULL;
-       touchkey->led_dev.max_brightness = LED_FULL;
+       touchkey->led_dev.max_brightness = LED_ON;
        touchkey->led_dev.brightness_set = tm2_touchkey_led_brightness_set;
 
        error = devm_led_classdev_register(&client->dev, &touchkey->led_dev);
index f11807db69792a1c19661b6921caceee247fbaf5..400869e61a0663be592e723f8f9de4f3596aaf3d 100644 (file)
@@ -256,6 +256,42 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
        return 0;
 }
 
+#ifdef CONFIG_ACPI
+static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
+                                            struct platform_device *pdev)
+{
+       unsigned long long hrv = 0;
+       acpi_status status;
+
+       if (IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY) &&
+           axp20x_pek->axp20x->variant == AXP288_ID) {
+               status = acpi_evaluate_integer(ACPI_HANDLE(pdev->dev.parent),
+                                              "_HRV", NULL, &hrv);
+               if (ACPI_FAILURE(status))
+                       dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
+
+               /*
+                * On Cherry Trail platforms (hrv == 3), do not register the
+                * input device if there is an "INTCFD9" or "ACPI0011" gpio
+                * button ACPI device, as that handles the power button too,
+                * and otherwise we end up reporting all presses twice.
+                */
+               if (hrv == 3 && (acpi_dev_present("INTCFD9", NULL, -1) ||
+                                acpi_dev_present("ACPI0011", NULL, -1)))
+                       return false;
+
+       }
+
+       return true;
+}
+#else
+static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
+                                            struct platform_device *pdev)
+{
+       return true;
+}
+#endif
+
 static int axp20x_pek_probe(struct platform_device *pdev)
 {
        struct axp20x_pek *axp20x_pek;
@@ -268,13 +304,7 @@ static int axp20x_pek_probe(struct platform_device *pdev)
 
        axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
 
-       /*
-        * Do not register the input device if there is an "INTCFD9"
-        * gpio button ACPI device, that handles the power button too,
-        * and otherwise we end up reporting all presses twice.
-        */
-       if (!acpi_dev_found("INTCFD9") ||
-           !IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY)) {
+       if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
                error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
                if (error)
                        return error;
index a679e56c44cd49ddea4361aebdb97d4fe7f1e12e..f431da07f861e50fa86574954f7e4b0c2c1e57c3 100644 (file)
@@ -554,32 +554,34 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
                                     struct completion *completion)
 {
        struct device *dev = &client->dev;
-       long ret;
        int error;
        int len;
-       u8 buffer[ETP_I2C_INF_LENGTH];
+       u8 buffer[ETP_I2C_REPORT_LEN];
+
+       len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
+       if (len != ETP_I2C_REPORT_LEN) {
+               error = len < 0 ? len : -EIO;
+               dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
+                       error, len);
+       }
 
        reinit_completion(completion);
        enable_irq(client->irq);
 
        error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET);
-       if (!error)
-               ret = wait_for_completion_interruptible_timeout(completion,
-                                                       msecs_to_jiffies(300));
-       disable_irq(client->irq);
-
        if (error) {
                dev_err(dev, "device reset failed: %d\n", error);
-               return error;
-       } else if (ret == 0) {
+       } else if (!wait_for_completion_timeout(completion,
+                                               msecs_to_jiffies(300))) {
                dev_err(dev, "timeout waiting for device reset\n");
-               return -ETIMEDOUT;
-       } else if (ret < 0) {
-               error = ret;
-               dev_err(dev, "error waiting for device reset: %d\n", error);
-               return error;
+               error = -ETIMEDOUT;
        }
 
+       disable_irq(client->irq);
+
+       if (error)
+               return error;
+
        len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH);
        if (len != ETP_I2C_INF_LENGTH) {
                error = len < 0 ? len : -EIO;
index 131df9d3660f0e4866b9ffc849ca0d6ce1d4e906..16c30460ef041b13686db7f6efe36860b725a4bd 100644 (file)
@@ -176,6 +176,12 @@ static const char * const smbus_pnp_ids[] = {
        NULL
 };
 
+static const char * const forcepad_pnp_ids[] = {
+       "SYN300D",
+       "SYN3014",
+       NULL
+};
+
 /*
  * Send a command to the synpatics touchpad by special commands
  */
@@ -397,6 +403,8 @@ static int synaptics_query_hardware(struct psmouse *psmouse,
 {
        int error;
 
+       memset(info, 0, sizeof(*info));
+
        error = synaptics_identify(psmouse, info);
        if (error)
                return error;
@@ -480,13 +488,6 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
        { }
 };
 
-/* This list has been kindly provided by Synaptics. */
-static const char * const forcepad_pnp_ids[] = {
-       "SYN300D",
-       "SYN3014",
-       NULL
-};
-
 /*****************************************************************************
  *     Synaptics communications functions
  ****************************************************************************/
@@ -1687,7 +1688,8 @@ enum {
        SYNAPTICS_INTERTOUCH_ON,
 };
 
-static int synaptics_intertouch = SYNAPTICS_INTERTOUCH_NOT_SET;
+static int synaptics_intertouch = IS_ENABLED(CONFIG_RMI4_SMB) ?
+               SYNAPTICS_INTERTOUCH_NOT_SET : SYNAPTICS_INTERTOUCH_OFF;
 module_param_named(synaptics_intertouch, synaptics_intertouch, int, 0644);
 MODULE_PARM_DESC(synaptics_intertouch, "Use a secondary bus for the Synaptics device.");
 
@@ -1737,8 +1739,16 @@ static int synaptics_setup_intertouch(struct psmouse *psmouse,
 
        if (synaptics_intertouch == SYNAPTICS_INTERTOUCH_NOT_SET) {
                if (!psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
-                   !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids))
+                   !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids)) {
+
+                       if (!psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids))
+                               psmouse_info(psmouse,
+                                            "Your touchpad (%s) says it can support a different bus. "
+                                            "If i2c-hid and hid-rmi are not used, you might want to try setting psmouse.synaptics_intertouch to 1 and report this to linux-input@vger.kernel.org.\n",
+                                            psmouse->ps2dev.serio->firmware_id);
+
                        return -ENXIO;
+               }
        }
 
        psmouse_info(psmouse, "Trying to set up SMBus access\n");
@@ -1810,6 +1820,15 @@ int synaptics_init(struct psmouse *psmouse)
        }
 
        if (SYN_CAP_INTERTOUCH(info.ext_cap_0c)) {
+               if ((!IS_ENABLED(CONFIG_RMI4_SMB) ||
+                    !IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS)) &&
+                   /* Forcepads need F21, which is not ready */
+                   !psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids)) {
+                       psmouse_warn(psmouse,
+                                    "The touchpad can support a better bus than the too old PS/2 protocol. "
+                                    "Make sure MOUSE_PS2_SYNAPTICS_SMBUS and RMI4_SMB are enabled to get a better touchpad experience.\n");
+               }
+
                error = synaptics_setup_intertouch(psmouse, &info, true);
                if (!error)
                        return PSMOUSE_SYNAPTICS_SMBUS;
index 2302aef2b2d403d47f8384c0127ad91104788ef5..dd042a9b0aaacc208056376b27a5fd7bf5755f6c 100644 (file)
@@ -350,6 +350,7 @@ static bool mxt_object_readable(unsigned int type)
        case MXT_TOUCH_KEYARRAY_T15:
        case MXT_TOUCH_PROXIMITY_T23:
        case MXT_TOUCH_PROXKEY_T52:
+       case MXT_TOUCH_MULTITOUCHSCREEN_T100:
        case MXT_PROCI_GRIPFACE_T20:
        case MXT_PROCG_NOISE_T22:
        case MXT_PROCI_ONETOUCH_T24:
index 8cf8d8d5d4ef4f82b45cc5ecbb959c92be95b699..f872817e81e46ba6d962a5edc207302e9bd25f3f 100644 (file)
@@ -471,7 +471,7 @@ static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
 static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET,
                M09_REGISTER_OFFSET, 0, 31);
 static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
-               M09_REGISTER_THRESHOLD, 20, 80);
+               M09_REGISTER_THRESHOLD, 0, 80);
 static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
                NO_REGISTER, 3, 14);
 
index 813dd68a5c82478f4636652fe6e5dec05ec1fd37..0dbcf105f7db348773592f54d4b85beb9b7ba4a3 100644 (file)
@@ -526,6 +526,7 @@ static int __maybe_unused silead_ts_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
 
+       disable_irq(client->irq);
        silead_ts_set_power(client, SILEAD_POWER_OFF);
        return 0;
 }
@@ -551,6 +552,8 @@ static int __maybe_unused silead_ts_resume(struct device *dev)
                return -ENODEV;
        }
 
+       enable_irq(client->irq);
+
        return 0;
 }
 
index d07dd5196ffca59c11532051fb88e2ecdc7326c9..8aa158a091806fd7d3107ed56b8550198be7b9a0 100644 (file)
@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
                       id);
                return NULL;
        } else {
-               rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
+               rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
                if (!rs)
                        return NULL;
                rs->state = CCPResetIdle;
index 8b7faea2ddf88b718c252dc049e5d1b8b5e8357b..422dced7c90ac26dcf0d366fedb32ab9edf44207 100644 (file)
@@ -75,7 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
                if (sk->sk_state != MISDN_BOUND)
                        continue;
                if (!cskb)
-                       cskb = skb_copy(skb, GFP_KERNEL);
+                       cskb = skb_copy(skb, GFP_ATOMIC);
                if (!cskb) {
                        printk(KERN_WARNING "%s no skb\n", __func__);
                        break;
index 78a7ce816a4792c7b1ddc101cece8c265084e630..9a873118ea5fcf4ff7adae18e03b3a351936df16 100644 (file)
@@ -285,7 +285,7 @@ static int pca955x_probe(struct i2c_client *client,
                        "slave address 0x%02x\n",
                        client->name, chip->bits, client->addr);
 
-       if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
                return -EIO;
 
        if (pdata) {
index bf7419a56454e3834ea2c2034d3170591f1ad97f..f4eace5ea184095eb0c170c4f3f1647f72b8c537 100644 (file)
@@ -485,10 +485,10 @@ void bitmap_print_sb(struct bitmap *bitmap)
        pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
        pr_debug("       version: %d\n", le32_to_cpu(sb->version));
        pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
-                *(__u32 *)(sb->uuid+0),
-                *(__u32 *)(sb->uuid+4),
-                *(__u32 *)(sb->uuid+8),
-                *(__u32 *)(sb->uuid+12));
+                le32_to_cpu(*(__u32 *)(sb->uuid+0)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+4)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+8)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+12)));
        pr_debug("        events: %llu\n",
                 (unsigned long long) le64_to_cpu(sb->events));
        pr_debug("events cleared: %llu\n",
index cd8139593ccd50655a2329460cc8de9d175eac86..840c1496b2b138ef504bde4c441b1082df183473 100644 (file)
@@ -1334,7 +1334,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
 {
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = REQ_PREFLUSH,
+               .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = c->dm_io,
index c7f7c8d7657670850adedceb505538e1b9cdb2ce..7910bfe50da4469c44b571363cc6696f74f5fa42 100644 (file)
@@ -783,7 +783,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
                        for (i = 0; i < commit_sections; i++)
                                rw_section_mac(ic, commit_start + i, true);
                }
-               rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp);
+               rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
+                          commit_sections, &io_comp);
        } else {
                unsigned to_end;
                io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
@@ -2374,21 +2375,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
        blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
 }
 
-/* FIXME: use new kvmalloc */
-static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
-{
-       void *ptr = NULL;
-
-       if (size <= PAGE_SIZE)
-               ptr = kmalloc(size, GFP_KERNEL | gfp);
-       if (!ptr && size <= KMALLOC_MAX_SIZE)
-               ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
-       if (!ptr)
-               ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
-
-       return ptr;
-}
-
 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
 {
        unsigned i;
@@ -2407,7 +2393,7 @@ static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
        struct page_list *pl;
        unsigned i;
 
-       pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO);
+       pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
        if (!pl)
                return NULL;
 
@@ -2437,7 +2423,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
        struct scatterlist **sl;
        unsigned i;
 
-       sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO);
+       sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO);
        if (!sl)
                return NULL;
 
@@ -2453,7 +2439,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
 
                n_pages = (end_index - start_index + 1);
 
-               s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0);
+               s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL);
                if (!s) {
                        dm_integrity_free_journal_scatterlist(ic, sl);
                        return NULL;
@@ -2617,7 +2603,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                goto bad;
                        }
 
-                       sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0);
+                       sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL);
                        if (!sg) {
                                *error = "Unable to allocate sg list";
                                r = -ENOMEM;
@@ -2673,7 +2659,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                r = -ENOMEM;
                                goto bad;
                        }
-                       ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO);
+                       ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO);
                        if (!ic->sk_requests) {
                                *error = "Unable to allocate sk requests";
                                r = -ENOMEM;
@@ -2740,7 +2726,7 @@ retest_commit_id:
                r = -ENOMEM;
                goto bad;
        }
-       ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0);
+       ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
        if (!ic->journal_tree) {
                *error = "Could not allocate memory for journal tree";
                r = -ENOMEM;
index 0555b4410e0598a6096642f10978ad6798bc5f98..41852ae287a58c29e675dd4b794f1670f9dc53e8 100644 (file)
@@ -1710,12 +1710,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
        }
 
        /*
-        * Try to avoid low memory issues when a device is suspended.
+        * Use __GFP_HIGH to avoid low memory issues when a device is
+        * suspended and the ioctl is needed to resume it.
         * Use kmalloc() rather than vmalloc() when we can.
         */
        dmi = NULL;
        noio_flag = memalloc_noio_save();
-       dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL);
+       dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH);
        memalloc_noio_restore(noio_flag);
 
        if (!dmi) {
index a95cbb80fb34444144bad346b3e769c625e8c788..e61c45047c25a9ba2683c313fbc2151c9051b178 100644 (file)
@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = REQ_PREFLUSH,
+               .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = ms->io_client,
index b93476c3ba3f9767fb133fed977e7a888cc0698e..c5534d294773fc0267a1b4c7438a3316e74d417a 100644 (file)
@@ -741,7 +741,8 @@ static void persistent_commit_exception(struct dm_exception_store *store,
        /*
         * Commit exceptions to disk.
         */
-       if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
+       if (ps->valid && area_io(ps, REQ_OP_WRITE,
+                                REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
                ps->valid = 0;
 
        /*
index 97de961a3bfc80d11497c5ac2558ce7ad7a57a7e..1ec9b2c51c076d99ba6003f90eae608d9c9e35af 100644 (file)
@@ -166,7 +166,7 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
                return r;
        }
 
-       if (likely(v->version >= 1))
+       if (likely(v->salt_size && (v->version >= 1)))
                r = verity_hash_update(v, req, v->salt, v->salt_size, res);
 
        return r;
@@ -177,7 +177,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
 {
        int r;
 
-       if (unlikely(!v->version)) {
+       if (unlikely(v->salt_size && (!v->version))) {
                r = verity_hash_update(v, req, v->salt, v->salt_size, res);
 
                if (r < 0) {
index 6ef9500226c0c7d789ed78e6876195f73ef9d6b7..37ccd73c79ecf2eeb4f33b5bc597f88ca5750d4b 100644 (file)
@@ -1657,7 +1657,7 @@ static struct mapped_device *alloc_dev(int minor)
 
        bio_init(&md->flush_bio, NULL, 0);
        md->flush_bio.bi_bdev = md->bdev;
-       md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+       md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
 
        dm_stats_init(&md->stats);
 
index 7299ce2f08a810555a0407a423a512a0f59f190c..03082e17c65cc87af2a44a8020bda6cbdb8b0262 100644 (file)
@@ -1311,8 +1311,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
        cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
        lock_comm(cinfo, 1);
        ret = __sendmsg(cinfo, &cmsg);
-       if (ret)
+       if (ret) {
+               unlock_comm(cinfo);
                return ret;
+       }
        cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
        ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
        cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
index 10367ffe92e3e37704f5e32793ea97175c8b15e6..212a6777ff3172dd9e20401dd7bf87ad5d2f7468 100644 (file)
@@ -765,7 +765,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
            test_bit(FailFast, &rdev->flags) &&
            !test_bit(LastDev, &rdev->flags))
                ff = MD_FAILFAST;
-       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
+       bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
 
        atomic_inc(&mddev->pending_writes);
        submit_bio(bio);
index 4c00bc248287e4ab89b492225e0d054973725549..0a7af8b0a80a031a99a7af1742e2d64e6df0d106 100644 (file)
@@ -1782,7 +1782,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
        mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
                                             mb, PAGE_SIZE));
        if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
-                         REQ_FUA, false)) {
+                         REQ_SYNC | REQ_FUA, false)) {
                __free_page(page);
                return -EIO;
        }
@@ -2388,7 +2388,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
                mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
                                                     mb, PAGE_SIZE));
                sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
-                            REQ_OP_WRITE, REQ_FUA, false);
+                            REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false);
                sh->log_start = ctx->pos;
                list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
                atomic_inc(&log->stripe_in_journal_count);
index 5d25bebf3328e4967334465916aca3e3c750e447..ccce92e68d7fa5d8258bb7f2ca2bfa1bcd545709 100644 (file)
@@ -907,8 +907,8 @@ static int ppl_write_empty_header(struct ppl_log *log)
        pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
 
        if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
-                         PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_FUA, 0,
-                         false)) {
+                         PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
+                         REQ_FUA, 0, false)) {
                md_error(rdev->mddev, rdev);
                ret = -EIO;
        }
index 9c4f7659f8b1337c99cfd0ab5070012e3f658849..722064689e822f3b876411f076921e244abbec2f 100644 (file)
@@ -4085,10 +4085,15 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
                        set_bit(STRIPE_INSYNC, &sh->state);
                else {
                        atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
+                               pr_warn_ratelimited("%s: mismatch sector in range "
+                                                   "%llu-%llu\n", mdname(conf->mddev),
+                                                   (unsigned long long) sh->sector,
+                                                   (unsigned long long) sh->sector +
+                                                   STRIPE_SECTORS);
+                       } else {
                                sh->check_state = check_state_compute_run;
                                set_bit(STRIPE_COMPUTE_RUN, &sh->state);
                                set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
@@ -4237,10 +4242,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
                        }
                } else {
                        atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
+                               pr_warn_ratelimited("%s: mismatch sector in range "
+                                                   "%llu-%llu\n", mdname(conf->mddev),
+                                                   (unsigned long long) sh->sector,
+                                                   (unsigned long long) sh->sector +
+                                                   STRIPE_SECTORS);
+                       } else {
                                int *target = &sh->ops.target;
 
                                sh->ops.target = -1;
index b72edd27f880fbe99641cd36b44005c6ad9252c6..55d9c2b82b7eab11268d88f7d8e5cf586f910985 100644 (file)
@@ -2,6 +2,12 @@
 # Multimedia device configuration
 #
 
+config CEC_CORE
+       tristate
+
+config CEC_NOTIFIER
+       bool
+
 menuconfig MEDIA_SUPPORT
        tristate "Multimedia support"
        depends on HAS_IOMEM
index 523fea3648ad71749009fc9f7d0f543d36734128..044503aa8801744785da9c637bda4c020bfac766 100644 (file)
@@ -4,8 +4,6 @@
 
 media-objs     := media-device.o media-devnode.o media-entity.o
 
-obj-$(CONFIG_CEC_CORE) += cec/
-
 #
 # I2C drivers should come before other drivers, otherwise they'll fail
 # when compiled as builtin drivers
@@ -26,6 +24,8 @@ obj-$(CONFIG_DVB_CORE)  += dvb-core/
 # There are both core and drivers at RC subtree - merge before drivers
 obj-y += rc/
 
+obj-$(CONFIG_CEC_CORE) += cec/
+
 #
 # Finally, merge the drivers that require the core
 #
index f944d93e3167f4e338a1e9cb607af32311136b50..4e25a950ae6f5af73c7bc7bd305a51368ef7d52b 100644 (file)
@@ -1,19 +1,5 @@
-config CEC_CORE
-       tristate
-       depends on MEDIA_CEC_SUPPORT
-       default y
-
-config MEDIA_CEC_NOTIFIER
-       bool
-
 config MEDIA_CEC_RC
        bool "HDMI CEC RC integration"
        depends on CEC_CORE && RC_CORE
        ---help---
          Pass on CEC remote control messages to the RC framework.
-
-config MEDIA_CEC_DEBUG
-       bool "HDMI CEC debugfs interface"
-       depends on CEC_CORE && DEBUG_FS
-       ---help---
-         Turns on the DebugFS interface for CEC devices.
index 402a6c62a3e8b9bb4857ac553e1b9a14c6babc16..eaf408e646697adcb8cc95b95c8425574715bdee 100644 (file)
@@ -1,6 +1,6 @@
 cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
 
-ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y)
+ifeq ($(CONFIG_CEC_NOTIFIER),y)
   cec-objs += cec-notifier.o
 endif
 
index f5fe01c9da8af17906805668b8448cd6aaf9da1e..9dfc79800c7191964557afe729603afa5cf2b5df 100644 (file)
@@ -1864,7 +1864,7 @@ void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
                WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
 }
 
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
 /*
  * Log the current state of the CEC adapter.
  * Very useful for debugging.
index f9ebff90f8ebc08b77088c8f1901d8aa7077b99f..2f87748ba4fceea377284be6a1c35b6478ca4788 100644 (file)
@@ -187,7 +187,7 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
        put_device(&devnode->dev);
 }
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
 static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
 {
        cec_s_phys_addr(adap, pa, false);
@@ -323,7 +323,7 @@ int cec_register_adapter(struct cec_adapter *adap,
        }
 
        dev_set_drvdata(&adap->devnode.dev, adap);
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
        if (!top_cec_dir)
                return 0;
 
@@ -355,7 +355,7 @@ void cec_unregister_adapter(struct cec_adapter *adap)
        adap->rc = NULL;
 #endif
        debugfs_remove_recursive(adap->cec_dir);
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
        if (adap->notifier)
                cec_notifier_unregister(adap->notifier);
 #endif
@@ -395,7 +395,7 @@ static int __init cec_devnode_init(void)
                return ret;
        }
 
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
        top_cec_dir = debugfs_create_dir("cec", NULL);
        if (IS_ERR_OR_NULL(top_cec_dir)) {
                pr_warn("cec: Failed to create debugfs cec dir\n");
index fd181c99ce117fc44c16ded99d50abe9a655b4f8..aaa9471c7d117eae0a00e4bbbf4ed20e3ca65b36 100644 (file)
@@ -220,7 +220,8 @@ config VIDEO_ADV7604
 
 config VIDEO_ADV7604_CEC
        bool "Enable Analog Devices ADV7604 CEC support"
-       depends on VIDEO_ADV7604 && CEC_CORE
+       depends on VIDEO_ADV7604
+       select CEC_CORE
        ---help---
          When selected the adv7604 will support the optional
          HDMI CEC feature.
@@ -240,7 +241,8 @@ config VIDEO_ADV7842
 
 config VIDEO_ADV7842_CEC
        bool "Enable Analog Devices ADV7842 CEC support"
-       depends on VIDEO_ADV7842 && CEC_CORE
+       depends on VIDEO_ADV7842
+       select CEC_CORE
        ---help---
          When selected the adv7842 will support the optional
          HDMI CEC feature.
@@ -478,7 +480,8 @@ config VIDEO_ADV7511
 
 config VIDEO_ADV7511_CEC
        bool "Enable Analog Devices ADV7511 CEC support"
-       depends on VIDEO_ADV7511 && CEC_CORE
+       depends on VIDEO_ADV7511
+       select CEC_CORE
        ---help---
          When selected the adv7511 will support the optional
          HDMI CEC feature.
index ac026ee1ca07484ffa6b0c51632d1c9958c0d3cd..041cb80a26b1ff22f049abaed44b0a62c9b91a9c 100644 (file)
@@ -501,8 +501,9 @@ if CEC_PLATFORM_DRIVERS
 
 config VIDEO_SAMSUNG_S5P_CEC
        tristate "Samsung S5P CEC driver"
-       depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
-       select MEDIA_CEC_NOTIFIER
+       depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+       select CEC_CORE
+       select CEC_NOTIFIER
        ---help---
          This is a driver for Samsung S5P HDMI CEC interface. It uses the
          generic CEC framework interface.
@@ -511,8 +512,9 @@ config VIDEO_SAMSUNG_S5P_CEC
 
 config VIDEO_STI_HDMI_CEC
        tristate "STMicroelectronics STiH4xx HDMI CEC driver"
-       depends on CEC_CORE && (ARCH_STI || COMPILE_TEST)
-       select MEDIA_CEC_NOTIFIER
+       depends on ARCH_STI || COMPILE_TEST
+       select CEC_CORE
+       select CEC_NOTIFIER
        ---help---
          This is a driver for STIH4xx HDMI CEC interface. It uses the
          generic CEC framework interface.
index 57a842ff309747382a928998d0209072476da421..b7731b18ecae1741ebee3be1bf9188659bfeb398 100644 (file)
@@ -493,10 +493,10 @@ static int vdec_h264_get_param(unsigned long h_vdec,
 }
 
 static struct vdec_common_if vdec_h264_if = {
-       vdec_h264_init,
-       vdec_h264_decode,
-       vdec_h264_get_param,
-       vdec_h264_deinit,
+       .init           = vdec_h264_init,
+       .decode         = vdec_h264_decode,
+       .get_param      = vdec_h264_get_param,
+       .deinit         = vdec_h264_deinit,
 };
 
 struct vdec_common_if *get_h264_dec_comm_if(void);
index 6e7a62ae0842c2e69bb65e31bb1fa80e7c9ce44c..b9fad6a488799ebc7fad8b12b6990b9c33d7c60b 100644 (file)
@@ -620,10 +620,10 @@ static void vdec_vp8_deinit(unsigned long h_vdec)
 }
 
 static struct vdec_common_if vdec_vp8_if = {
-       vdec_vp8_init,
-       vdec_vp8_decode,
-       vdec_vp8_get_param,
-       vdec_vp8_deinit,
+       .init           = vdec_vp8_init,
+       .decode         = vdec_vp8_decode,
+       .get_param      = vdec_vp8_get_param,
+       .deinit         = vdec_vp8_deinit,
 };
 
 struct vdec_common_if *get_vp8_dec_comm_if(void);
index 5539b1853f166a611ed678bc1274f55e48f1347c..1daee1207469b3ea9e740676aa80765a4280c118 100644 (file)
@@ -979,10 +979,10 @@ static int vdec_vp9_get_param(unsigned long h_vdec,
 }
 
 static struct vdec_common_if vdec_vp9_if = {
-       vdec_vp9_init,
-       vdec_vp9_decode,
-       vdec_vp9_get_param,
-       vdec_vp9_deinit,
+       .init           = vdec_vp9_init,
+       .decode         = vdec_vp9_decode,
+       .get_param      = vdec_vp9_get_param,
+       .deinit         = vdec_vp9_deinit,
 };
 
 struct vdec_common_if *get_vp9_dec_comm_if(void);
index b36ac19dc6e48d60afbc4053fb848d9439dfcd5d..154de92dd809e74ff3d7539787856fef631229fc 100644 (file)
@@ -26,7 +26,8 @@ config VIDEO_VIVID
 
 config VIDEO_VIVID_CEC
        bool "Enable CEC emulation support"
-       depends on VIDEO_VIVID && CEC_CORE
+       depends on VIDEO_VIVID
+       select CEC_CORE
        ---help---
          When selected the vivid module will emulate the optional
          HDMI CEC feature.
index 90f66dc7c0d74dbed7cf370f652546eadc563bf8..a2fc1a1d58b0e317539a1a679e32f96c12d3b00f 100644 (file)
@@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
  */
 void ir_raw_event_handle(struct rc_dev *dev)
 {
-       if (!dev->raw)
+       if (!dev->raw || !dev->raw->thread)
                return;
 
        wake_up_process(dev->raw->thread);
@@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev)
 {
        int rc;
        struct ir_raw_handler *handler;
+       struct task_struct *thread;
 
        if (!dev)
                return -EINVAL;
@@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev)
         * because the event is coming from userspace
         */
        if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
-               dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
-                                              "rc%u", dev->minor);
+               thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
+                                    dev->minor);
 
-               if (IS_ERR(dev->raw->thread)) {
-                       rc = PTR_ERR(dev->raw->thread);
+               if (IS_ERR(thread)) {
+                       rc = PTR_ERR(thread);
                        goto out;
                }
+
+               dev->raw->thread = thread;
        }
 
        mutex_lock(&ir_raw_handler_lock);
index 8937f3986a01f1c89e3fcc5b6825cb6b754ddf20..18ead44824ba2bd02d27480e0f04ac69f39182fd 100644 (file)
@@ -1,6 +1,7 @@
 config USB_PULSE8_CEC
        tristate "Pulse Eight HDMI CEC"
-       depends on USB_ACM && CEC_CORE
+       depends on USB_ACM
+       select CEC_CORE
        select SERIO
        select SERIO_SERPORT
        ---help---
index 3eb86607efb8f627566af1ff374053d7f6b980ec..030ef01b1ff04137ede4a84ddf0357afe943343d 100644 (file)
@@ -1,6 +1,7 @@
 config USB_RAINSHADOW_CEC
        tristate "RainShadow Tech HDMI CEC"
-       depends on USB_ACM && CEC_CORE
+       depends on USB_ACM
+       select CEC_CORE
        select SERIO
        select SERIO_SERPORT
        ---help---
index 541ca543f71f4efe7e29e7c22bce114c2b18fc3d..71bd68548c9c87d3359a458efe9069c59a81e81a 100644 (file)
@@ -119,7 +119,7 @@ static void rain_irq_work_handler(struct work_struct *work)
 
        while (true) {
                unsigned long flags;
-               bool exit_loop;
+               bool exit_loop = false;
                char data;
 
                spin_lock_irqsave(&rain->buf_lock, flags);
index 35910f945bfad02823f7146c0746feb90aa2cda9..99e644cda4d13db301b713a5752788c0f646dfa1 100644 (file)
@@ -581,7 +581,7 @@ static int atmel_ebi_probe(struct platform_device *pdev)
        return of_platform_populate(np, NULL, NULL, dev);
 }
 
-static int atmel_ebi_resume(struct device *dev)
+static __maybe_unused int atmel_ebi_resume(struct device *dev)
 {
        struct atmel_ebi *ebi = dev_get_drvdata(dev);
        struct atmel_ebi_dev *ebid;
index 17b433f1ce23b7deeb2e3f35139d3bef57ef20d0..0761271d68c5613b23152c03522f6a388950ea86 100644 (file)
@@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
 
        /* Do this outside the status_mutex to avoid a circular dependency with
         * the locking in cxl_mmap_fault() */
-       if (copy_from_user(&work, uwork,
-                          sizeof(struct cxl_ioctl_start_work))) {
-               rc = -EFAULT;
-               goto out;
-       }
+       if (copy_from_user(&work, uwork, sizeof(work)))
+               return -EFAULT;
 
        mutex_lock(&ctx->status_mutex);
        if (ctx->status != OPENED) {
index 871a2f09c71845b2803bab920618c36de634e4fc..8d6ea9712dbd1830fcdc5d6eecda3d28d69a9376 100644 (file)
@@ -1302,13 +1302,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
 
 void cxl_native_release_psl_err_irq(struct cxl *adapter)
 {
-       if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
+       if (adapter->native->err_virq == 0 ||
+           adapter->native->err_virq !=
+           irq_find_mapping(NULL, adapter->native->err_hwirq))
                return;
 
        cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
        cxl_unmap_irq(adapter->native->err_virq, adapter);
        cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
        kfree(adapter->irq_name);
+       adapter->native->err_virq = 0;
 }
 
 int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1346,13 +1349,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
 
 void cxl_native_release_serr_irq(struct cxl_afu *afu)
 {
-       if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+       if (afu->serr_virq == 0 ||
+           afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
                return;
 
        cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
        cxl_unmap_irq(afu->serr_virq, afu);
        cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
        kfree(afu->err_irq_name);
+       afu->serr_virq = 0;
 }
 
 int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1375,12 +1380,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
 
 void cxl_native_release_psl_irq(struct cxl_afu *afu)
 {
-       if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
+       if (afu->native->psl_virq == 0 ||
+           afu->native->psl_virq !=
+           irq_find_mapping(NULL, afu->native->psl_hwirq))
                return;
 
        cxl_unmap_irq(afu->native->psl_virq, afu);
        cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
        kfree(afu->psl_irq_name);
+       afu->native->psl_virq = 0;
 }
 
 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
index c862cd4583cc93694747e191f5e3537e5767bfa5..b8069eec18cb44ef335535789f0e2a61ffaf4bd1 100644 (file)
@@ -309,6 +309,9 @@ static inline enum xp_retval
 xpc_send(short partid, int ch_number, u32 flags, void *payload,
         u16 payload_size)
 {
+       if (!xpc_interface.send)
+               return xpNotLoaded;
+
        return xpc_interface.send(partid, ch_number, flags, payload,
                                  payload_size);
 }
@@ -317,6 +320,9 @@ static inline enum xp_retval
 xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
                u16 payload_size, xpc_notify_func func, void *key)
 {
+       if (!xpc_interface.send_notify)
+               return xpNotLoaded;
+
        return xpc_interface.send_notify(partid, ch_number, flags, payload,
                                         payload_size, func, key);
 }
@@ -324,12 +330,16 @@ xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
 static inline void
 xpc_received(short partid, int ch_number, void *payload)
 {
-       return xpc_interface.received(partid, ch_number, payload);
+       if (xpc_interface.received)
+               xpc_interface.received(partid, ch_number, payload);
 }
 
 static inline enum xp_retval
 xpc_partid_to_nasids(short partid, void *nasids)
 {
+       if (!xpc_interface.partid_to_nasids)
+               return xpNotLoaded;
+
        return xpc_interface.partid_to_nasids(partid, nasids);
 }
 
index 01be66d02ca8ce52c84b809fa55a7aeb6b219bc2..6d7f557fd1c1a1e885eb3dad64886b3e0afe32fe 100644 (file)
@@ -69,23 +69,9 @@ struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
 EXPORT_SYMBOL_GPL(xpc_registrations);
 
 /*
- * Initialize the XPC interface to indicate that XPC isn't loaded.
+ * Initialize the XPC interface to NULL to indicate that XPC isn't loaded.
  */
-static enum xp_retval
-xpc_notloaded(void)
-{
-       return xpNotLoaded;
-}
-
-struct xpc_interface xpc_interface = {
-       (void (*)(int))xpc_notloaded,
-       (void (*)(int))xpc_notloaded,
-       (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
-       (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
-                          void *))xpc_notloaded,
-       (void (*)(short, int, void *))xpc_notloaded,
-       (enum xp_retval(*)(short, void *))xpc_notloaded
-};
+struct xpc_interface xpc_interface = { };
 EXPORT_SYMBOL_GPL(xpc_interface);
 
 /*
@@ -115,17 +101,7 @@ EXPORT_SYMBOL_GPL(xpc_set_interface);
 void
 xpc_clear_interface(void)
 {
-       xpc_interface.connect = (void (*)(int))xpc_notloaded;
-       xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
-       xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
-           xpc_notloaded;
-       xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
-                                                      u16, xpc_notify_func,
-                                                      void *))xpc_notloaded;
-       xpc_interface.received = (void (*)(short, int, void *))
-           xpc_notloaded;
-       xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
-           xpc_notloaded;
+       memset(&xpc_interface, 0, sizeof(xpc_interface));
 }
 EXPORT_SYMBOL_GPL(xpc_clear_interface);
 
@@ -188,7 +164,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
 
        mutex_unlock(&registration->mutex);
 
-       xpc_interface.connect(ch_number);
+       if (xpc_interface.connect)
+               xpc_interface.connect(ch_number);
 
        return xpSuccess;
 }
@@ -237,7 +214,8 @@ xpc_disconnect(int ch_number)
        registration->assigned_limit = 0;
        registration->idle_limit = 0;
 
-       xpc_interface.disconnect(ch_number);
+       if (xpc_interface.disconnect)
+               xpc_interface.disconnect(ch_number);
 
        mutex_unlock(&registration->mutex);
 
index 1304160de16828f402dad6ff1f970af70764c3b2..13ef162cf066a63363106e40a513f3317205d10d 100644 (file)
@@ -27,6 +27,7 @@ struct mmc_pwrseq_simple {
        struct mmc_pwrseq pwrseq;
        bool clk_enabled;
        u32 post_power_on_delay_ms;
+       u32 power_off_delay_us;
        struct clk *ext_clk;
        struct gpio_descs *reset_gpios;
 };
@@ -78,6 +79,10 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
 
        mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
 
+       if (pwrseq->power_off_delay_us)
+               usleep_range(pwrseq->power_off_delay_us,
+                       2 * pwrseq->power_off_delay_us);
+
        if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) {
                clk_disable_unprepare(pwrseq->ext_clk);
                pwrseq->clk_enabled = false;
@@ -119,6 +124,8 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
 
        device_property_read_u32(dev, "post-power-on-delay-ms",
                                 &pwrseq->post_power_on_delay_ms);
+       device_property_read_u32(dev, "power-off-delay-us",
+                                &pwrseq->power_off_delay_us);
 
        pwrseq->pwrseq.dev = dev;
        pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
index 772d0900026d0efbd6e59911f93ef9cc8930b38a..951d2cdd7888b0d68f9994298572759162277682 100644 (file)
@@ -108,7 +108,7 @@ static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
 static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
 {
        writeq(val, host->base + MIO_EMM_INT(host));
-       if (!host->dma_active || (host->dma_active && !host->has_ciu3))
+       if (!host->has_ciu3)
                writeq(val, host->base + MIO_EMM_INT_EN(host));
 }
 
@@ -267,7 +267,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
        }
 
        host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
-                                                        "power-gpios",
+                                                        "power",
                                                         GPIOD_OUT_HIGH);
        if (IS_ERR(host->global_pwr_gpiod)) {
                dev_err(&pdev->dev, "Invalid power GPIO\n");
@@ -288,11 +288,20 @@ static int octeon_mmc_probe(struct platform_device *pdev)
                if (ret) {
                        dev_err(&pdev->dev, "Error populating slots\n");
                        octeon_mmc_set_shared_power(host, 0);
-                       return ret;
+                       goto error;
                }
                i++;
        }
        return 0;
+
+error:
+       for (i = 0; i < CAVIUM_MAX_MMC; i++) {
+               if (host->slot[i])
+                       cvm_mmc_of_slot_remove(host->slot[i]);
+               if (host->slot_pdev[i])
+                       of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
+       }
+       return ret;
 }
 
 static int octeon_mmc_remove(struct platform_device *pdev)
index fe3d77267cd6b7803ae287d8b6fc5fc308f95a87..b9cc9599879978972b4c8c96f9dbddc26caebb2f 100644 (file)
@@ -146,6 +146,12 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
        return 0;
 
 error:
+       for (i = 0; i < CAVIUM_MAX_MMC; i++) {
+               if (host->slot[i])
+                       cvm_mmc_of_slot_remove(host->slot[i]);
+               if (host->slot_pdev[i])
+                       of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
+       }
        clk_disable_unprepare(host->clk);
        return ret;
 }
index 58b51ba6aabd2de7773209d42aa758cb493c2500..b8aaf0fdb77cf52bf89cd3000a98b77536a6f5a4 100644 (file)
@@ -839,14 +839,14 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                cvm_mmc_reset_bus(slot);
                if (host->global_pwr_gpiod)
                        host->set_shared_power(host, 0);
-               else
+               else if (!IS_ERR(mmc->supply.vmmc))
                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
                break;
 
        case MMC_POWER_UP:
                if (host->global_pwr_gpiod)
                        host->set_shared_power(host, 1);
-               else
+               else if (!IS_ERR(mmc->supply.vmmc))
                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
                break;
        }
@@ -968,20 +968,15 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
                return -EINVAL;
        }
 
-       mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
-       if (IS_ERR(mmc->supply.vmmc)) {
-               if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
-               /*
-                * Legacy Octeon firmware has no regulator entry, fall-back to
-                * a hard-coded voltage to get a sane OCR.
-                */
+       ret = mmc_regulator_get_supply(mmc);
+       if (ret == -EPROBE_DEFER)
+               return ret;
+       /*
+        * Legacy Octeon firmware has no regulator entry, fall-back to
+        * a hard-coded voltage to get a sane OCR.
+        */
+       if (IS_ERR(mmc->supply.vmmc))
                mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
-       } else {
-               ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
-               if (ret > 0)
-                       mmc->ocr_avail = ret;
-       }
 
        /* Common MMC bindings */
        ret = mmc_of_parse(mmc);
index 3275d49958120857d899384237bc905ad5fd17a5..61666d2697713a7665191b848084846142644457 100644 (file)
@@ -187,7 +187,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = {
 };
 
 static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
-       .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+       .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
        .ops = &sdhci_iproc_ops,
 };
index 6356781f1cca78190bff46e5225606e0f2113cc7..f7e26b031e768d871ed465cde982005295066e82 100644 (file)
@@ -787,14 +787,6 @@ int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios)
        return ret;
 }
 
-void xenon_clean_phy(struct sdhci_host *host)
-{
-       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-       struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
-
-       kfree(priv->phy_params);
-}
-
 static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
                         const char *phy_name)
 {
@@ -819,11 +811,7 @@ static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
        if (ret)
                return ret;
 
-       ret = xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params);
-       if (ret)
-               xenon_clean_phy(host);
-
-       return ret;
+       return xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params);
 }
 
 int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host)
index 67246655315b02b005b00153abfaa60c67fca7a5..bc1781bb070b7b8b83c0132abb114f6905ffddfb 100644 (file)
@@ -486,7 +486,7 @@ static int xenon_probe(struct platform_device *pdev)
 
        err = xenon_sdhc_prepare(host);
        if (err)
-               goto clean_phy_param;
+               goto err_clk;
 
        err = sdhci_add_host(host);
        if (err)
@@ -496,8 +496,6 @@ static int xenon_probe(struct platform_device *pdev)
 
 remove_sdhc:
        xenon_sdhc_unprepare(host);
-clean_phy_param:
-       xenon_clean_phy(host);
 err_clk:
        clk_disable_unprepare(pltfm_host->clk);
 free_pltfm:
@@ -510,8 +508,6 @@ static int xenon_remove(struct platform_device *pdev)
        struct sdhci_host *host = platform_get_drvdata(pdev);
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 
-       xenon_clean_phy(host);
-
        sdhci_remove_host(host, 0);
 
        xenon_sdhc_unprepare(host);
index 6e6523ea01ce50389f3b77f52b467efb11305831..73debb42dc2f9991356f59668d18ebd09296f927 100644 (file)
@@ -93,7 +93,6 @@ struct xenon_priv {
 };
 
 int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios);
-void xenon_clean_phy(struct sdhci_host *host);
 int xenon_phy_parse_dt(struct device_node *np,
                       struct sdhci_host *host);
 void xenon_soc_pad_ctrl(struct sdhci_host *host,
index d474378ed810b3c3ab19d8a4e85e77e0eb15511c..b1dd12729f19b29ea8f35886aba02cc986990661 100644 (file)
@@ -202,7 +202,7 @@ static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
        return 0;
 }
 
-const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
        .ecc = nand_ooblayout_ecc_lp_hamming,
        .free = nand_ooblayout_free_lp_hamming,
 };
@@ -4361,7 +4361,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
        /* Initialize the ->data_interface field. */
        ret = nand_init_data_interface(chip);
        if (ret)
-               return ret;
+               goto err_nand_init;
 
        /*
         * Setup the data interface correctly on the chip and controller side.
@@ -4373,7 +4373,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
         */
        ret = nand_setup_data_interface(chip);
        if (ret)
-               return ret;
+               goto err_nand_init;
 
        nand_maf_id = chip->id.data[0];
        nand_dev_id = chip->id.data[1];
@@ -4404,6 +4404,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
        mtd->size = i * chip->chipsize;
 
        return 0;
+
+err_nand_init:
+       /* Free manufacturer priv data. */
+       nand_manufacturer_cleanup(chip);
+
+       return ret;
 }
 EXPORT_SYMBOL(nand_scan_ident);
 
@@ -4574,18 +4580,23 @@ int nand_scan_tail(struct mtd_info *mtd)
 
        /* New bad blocks should be marked in OOB, flash-based BBT, or both */
        if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
-                  !(chip->bbt_options & NAND_BBT_USE_FLASH)))
-               return -EINVAL;
+                  !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
+               ret = -EINVAL;
+               goto err_ident;
+       }
 
        if (invalid_ecc_page_accessors(chip)) {
                pr_err("Invalid ECC page accessors setup\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_ident;
        }
 
        if (!(chip->options & NAND_OWN_BUFFERS)) {
                nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
-               if (!nbuf)
-                       return -ENOMEM;
+               if (!nbuf) {
+                       ret = -ENOMEM;
+                       goto err_ident;
+               }
 
                nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
                if (!nbuf->ecccalc) {
@@ -4608,8 +4619,10 @@ int nand_scan_tail(struct mtd_info *mtd)
 
                chip->buffers = nbuf;
        } else {
-               if (!chip->buffers)
-                       return -ENOMEM;
+               if (!chip->buffers) {
+                       ret = -ENOMEM;
+                       goto err_ident;
+               }
        }
 
        /* Set the internal oob buffer location, just after the page data */
@@ -4842,7 +4855,11 @@ int nand_scan_tail(struct mtd_info *mtd)
                return 0;
 
        /* Build bad block table */
-       return chip->scan_bbt(mtd);
+       ret = chip->scan_bbt(mtd);
+       if (ret)
+               goto err_free;
+       return 0;
+
 err_free:
        if (nbuf) {
                kfree(nbuf->databuf);
@@ -4850,6 +4867,13 @@ err_free:
                kfree(nbuf->ecccalc);
                kfree(nbuf);
        }
+
+err_ident:
+       /* Clean up nand_scan_ident(). */
+
+       /* Free manufacturer priv data. */
+       nand_manufacturer_cleanup(chip);
+
        return ret;
 }
 EXPORT_SYMBOL(nand_scan_tail);
index 9d5ca0e540b5bc5c9e3be9a6a3b476624e63be14..92e2cf8e9ff9066860973caaf60ecb95e2d4a599 100644 (file)
@@ -6,7 +6,6 @@
  * published by the Free Software Foundation.
  *
  */
-#include <linux/module.h>
 #include <linux/mtd/nand.h>
 #include <linux/sizes.h>
 
index 9cfc4035a420a3eae2d2a451c97f5300f8bc8b80..1e0755997762aa23ee953ad20f2a88d24dc7139c 100644 (file)
@@ -84,6 +84,9 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
                        case 7:
                                chip->ecc_strength_ds = 60;
                                break;
+                       default:
+                               WARN(1, "Could not decode ECC info");
+                               chip->ecc_step_ds = 0;
                        }
                }
        } else {
index 05b6e106520331ddd48f619f798d21c4cfeb2059..49b286c6c10fc85e5ee7e75f4dd10d231c86c73f 100644 (file)
  * byte 1 for other packets in the page (PKT_N, for N > 0)
  * ERR_COUNT_PKT_N is the max error count over all but the first packet.
  */
-#define DECODE_OK_PKT_0(v)     ((v) & BIT(7))
-#define DECODE_OK_PKT_N(v)     ((v) & BIT(15))
 #define ERR_COUNT_PKT_0(v)     (((v) >> 0) & 0x3f)
 #define ERR_COUNT_PKT_N(v)     (((v) >> 8) & 0x3f)
+#define DECODE_FAIL_PKT_0(v)   (((v) & BIT(7)) == 0)
+#define DECODE_FAIL_PKT_N(v)   (((v) & BIT(15)) == 0)
 
 /* Offsets relative to pbus_base */
 #define PBUS_CS_CTRL   0x83c
@@ -193,6 +193,8 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
                                                  chip->ecc.strength);
                if (res < 0)
                        mtd->ecc_stats.failed++;
+               else
+                       mtd->ecc_stats.corrected += res;
 
                bitflips = max(res, bitflips);
                buf += pkt_size;
@@ -202,9 +204,11 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
        return bitflips;
 }
 
-static int decode_error_report(struct tango_nfc *nfc)
+static int decode_error_report(struct nand_chip *chip)
 {
        u32 status, res;
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       struct tango_nfc *nfc = to_tango_nfc(chip->controller);
 
        status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
        if (status & PAGE_IS_EMPTY)
@@ -212,10 +216,14 @@ static int decode_error_report(struct tango_nfc *nfc)
 
        res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
 
-       if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res))
-               return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+       if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res))
+               return -EBADMSG;
+
+       /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */
+       mtd->ecc_stats.corrected +=
+               ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res);
 
-       return -EBADMSG;
+       return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
 }
 
 static void tango_dma_callback(void *arg)
@@ -282,7 +290,7 @@ static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
        if (err)
                return err;
 
-       res = decode_error_report(nfc);
+       res = decode_error_report(chip);
        if (res < 0) {
                chip->ecc.read_oob_raw(mtd, chip, page);
                res = check_erased_page(chip, buf);
@@ -663,6 +671,7 @@ static const struct of_device_id tango_nand_ids[] = {
        { .compatible = "sigma,smp8758-nand" },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, tango_nand_ids);
 
 static struct platform_driver tango_nand_driver = {
        .probe  = tango_nand_probe,
index c5fd4259da331b27503644938ab22787e2eea8ae..b44a6aeb346d0404144dde0304a502268de1da91 100644 (file)
@@ -2577,7 +2577,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
                return -1;
 
        ad_info->aggregator_id = aggregator->aggregator_identifier;
-       ad_info->ports = aggregator->num_of_ports;
+       ad_info->ports = __agg_active_ports(aggregator);
        ad_info->actor_key = aggregator->actor_oper_aggregator_key;
        ad_info->partner_key = aggregator->partner_oper_aggregator_key;
        ether_addr_copy(ad_info->partner_system,
index 2be78807fd6e1318487961708394caead76af268..2359478b977f0e008335e51dc8f63adc2dc35087 100644 (file)
@@ -2612,11 +2612,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
        bond_for_each_slave_rcu(bond, slave, iter) {
                unsigned long trans_start = dev_trans_start(slave->dev);
 
+               slave->new_link = BOND_LINK_NOCHANGE;
+
                if (slave->link != BOND_LINK_UP) {
                        if (bond_time_in_interval(bond, trans_start, 1) &&
                            bond_time_in_interval(bond, slave->last_rx, 1)) {
 
-                               slave->link  = BOND_LINK_UP;
+                               slave->new_link = BOND_LINK_UP;
                                slave_state_changed = 1;
 
                                /* primary_slave has no meaning in round-robin
@@ -2643,7 +2645,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
                        if (!bond_time_in_interval(bond, trans_start, 2) ||
                            !bond_time_in_interval(bond, slave->last_rx, 2)) {
 
-                               slave->link  = BOND_LINK_DOWN;
+                               slave->new_link = BOND_LINK_DOWN;
                                slave_state_changed = 1;
 
                                if (slave->link_failure_count < UINT_MAX)
@@ -2674,6 +2676,11 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
                if (!rtnl_trylock())
                        goto re_arm;
 
+               bond_for_each_slave(bond, slave, iter) {
+                       if (slave->new_link != BOND_LINK_NOCHANGE)
+                               slave->link = slave->new_link;
+               }
+
                if (slave_state_changed) {
                        bond_slave_state_change(bond);
                        if (BOND_MODE(bond) == BOND_MODE_XOR)
@@ -4271,10 +4278,10 @@ static int bond_check_params(struct bond_params *params)
        int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
        struct bond_opt_value newval;
        const struct bond_opt_value *valptr;
-       int arp_all_targets_value;
+       int arp_all_targets_value = 0;
        u16 ad_actor_sys_prio = 0;
        u16 ad_user_port_key = 0;
-       __be32 arp_target[BOND_MAX_ARP_TARGETS];
+       __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
        int arp_ip_count;
        int bond_mode   = BOND_MODE_ROUNDROBIN;
        int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
@@ -4501,7 +4508,6 @@ static int bond_check_params(struct bond_params *params)
                arp_validate_value = 0;
        }
 
-       arp_all_targets_value = 0;
        if (arp_all_targets) {
                bond_opt_initstr(&newval, arp_all_targets);
                valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
index 96046bb12ca17333530f237fddb46438c3298dea..14c0be98e0a4d449aa4122c2db6e9ef6af007c84 100644 (file)
@@ -114,13 +114,13 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
        return -EOPNOTSUPP;
 }
 
-int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
-                          int src_port, u16 data)
+static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
+                                        int src_dev, int src_port, u16 data)
 {
        return -EOPNOTSUPP;
 }
 
-int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
+static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
 {
        return -EOPNOTSUPP;
 }
index b0a3b85fc6f8d346f3a7d421b094b491d022dd12..db02bc2fb4b2d634ed454cedfcf57692e9b941c0 100644 (file)
@@ -748,13 +748,13 @@ static int ax_init_dev(struct net_device *dev)
 
        ret = ax_mii_init(dev);
        if (ret)
-               goto out_irq;
+               goto err_out;
 
        ax_NS8390_init(dev, 0);
 
        ret = register_netdev(dev);
        if (ret)
-               goto out_irq;
+               goto err_out;
 
        netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
                    ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
@@ -762,9 +762,6 @@ static int ax_init_dev(struct net_device *dev)
 
        return 0;
 
- out_irq:
-       /* cleanup irq */
-       free_irq(dev->irq, dev);
  err_out:
        return ret;
 }
index b3bc87fe3764e397e4a9ce19518866cb97530771..0a98c369df2045ccbb9fbf7a55af848530a5f464 100644 (file)
@@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
                              struct xgbe_ring *ring,
                              struct xgbe_ring_data *rdata)
 {
-       int order, ret;
+       int ret;
 
        if (!ring->rx_hdr_pa.pages) {
                ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
@@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
        }
 
        if (!ring->rx_buf_pa.pages) {
-               order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
                ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
-                                      order);
+                                      PAGE_ALLOC_COSTLY_ORDER);
                if (ret)
                        return ret;
        }
index 63f2deec2a52994684fa7a58763f68502265542e..77a1c03255defa77f2c662650d41a1ffc68eb7bb 100644 (file)
@@ -1353,6 +1353,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
                pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
                printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
+               err = -EIO;
                goto err_dma;
        }
 
@@ -1366,10 +1367,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * pcibios_set_master to do the needed arch specific settings */
        pci_set_master(pdev);
 
-       err = -ENOMEM;
        netdev = alloc_etherdev(sizeof(struct atl2_adapter));
-       if (!netdev)
+       if (!netdev) {
+               err = -ENOMEM;
                goto err_alloc_etherdev;
+       }
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
@@ -1408,8 +1410,6 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_sw_init;
 
-       err = -EIO;
-
        netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
        netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
 
index 099b374c1b17bbd8e9cabe68cdc7cd991a258737..5274501428e4fb05850bada0d4ff3cd8a346f59f 100644 (file)
@@ -2026,9 +2026,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        priv->num_rx_desc_words = params->num_rx_desc_words;
 
        priv->irq0 = platform_get_irq(pdev, 0);
-       if (!priv->is_lite)
+       if (!priv->is_lite) {
                priv->irq1 = platform_get_irq(pdev, 1);
-       priv->wol_irq = platform_get_irq(pdev, 2);
+               priv->wol_irq = platform_get_irq(pdev, 2);
+       } else {
+               priv->wol_irq = platform_get_irq(pdev, 1);
+       }
        if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
                dev_err(&pdev->dev, "invalid interrupts\n");
                ret = -EINVAL;
index eccb3d1b6abb748c14567d0efcdc28a405b64fb5..5f49334dcad5a8c8602cc3aa2e8795b2d489bb43 100644 (file)
@@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
        }
 
        /* select a non-FCoE queue */
-       return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+       return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
index 38a5c6764bb50f45124c212db37e11d2cc777076..77ed2f628f9ca23854ae8b062ff919ce6d2e3425 100644 (file)
@@ -2196,10 +2196,14 @@ static int cxgb_up(struct adapter *adap)
                if (err)
                        goto irq_err;
        }
+
+       mutex_lock(&uld_mutex);
        enable_rx(adap);
        t4_sge_start(adap);
        t4_intr_enable(adap);
        adap->flags |= FULL_INIT_DONE;
+       mutex_unlock(&uld_mutex);
+
        notify_ulds(adap, CXGB4_STATE_UP);
 #if IS_ENABLED(CONFIG_IPV6)
        update_clip(adap);
@@ -2771,6 +2775,9 @@ void t4_fatal_err(struct adapter *adap)
 {
        int port;
 
+       if (pci_channel_offline(adap->pdev))
+               return;
+
        /* Disable the SGE since ULDs are going to free resources that
         * could be exposed to the adapter.  RDMA MWs for example...
         */
@@ -3882,9 +3889,10 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
        spin_lock(&adap->stats_lock);
        for_each_port(adap, i) {
                struct net_device *dev = adap->port[i];
-
-               netif_device_detach(dev);
-               netif_carrier_off(dev);
+               if (dev) {
+                       netif_device_detach(dev);
+                       netif_carrier_off(dev);
+               }
        }
        spin_unlock(&adap->stats_lock);
        disable_interrupts(adap);
@@ -3963,12 +3971,13 @@ static void eeh_resume(struct pci_dev *pdev)
        rtnl_lock();
        for_each_port(adap, i) {
                struct net_device *dev = adap->port[i];
-
-               if (netif_running(dev)) {
-                       link_start(dev);
-                       cxgb_set_rxmode(dev);
+               if (dev) {
+                       if (netif_running(dev)) {
+                               link_start(dev);
+                               cxgb_set_rxmode(dev);
+                       }
+                       netif_device_attach(dev);
                }
-               netif_device_attach(dev);
        }
        rtnl_unlock();
 }
index aded42b96f6d966ba7e814c0a89c738968a655b6..3a34aa629f7dd81a56e6b5c1c63bfdeb685c3f25 100644 (file)
@@ -4557,8 +4557,13 @@ void t4_intr_enable(struct adapter *adapter)
  */
 void t4_intr_disable(struct adapter *adapter)
 {
-       u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
-       u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+       u32 whoami, pf;
+
+       if (pci_channel_offline(adapter->pdev))
+               return;
+
+       whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+       pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
                        SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
        t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
index 3549d387627888a2629b5f07dd1b001d2db1fc70..f2d623a7aee04e21f1e4e52645d66788a59341ab 100644 (file)
@@ -37,7 +37,7 @@
 
 #define T4FW_VERSION_MAJOR 0x01
 #define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x2B
+#define T4FW_VERSION_MICRO 0x2D
 #define T4FW_VERSION_BUILD 0x00
 
 #define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
 
 #define T5FW_VERSION_MAJOR 0x01
 #define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x2B
+#define T5FW_VERSION_MICRO 0x2D
 #define T5FW_VERSION_BUILD 0x00
 
 #define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
 
 #define T6FW_VERSION_MAJOR 0x01
 #define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x2B
+#define T6FW_VERSION_MICRO 0x2D
 #define T6FW_VERSION_BUILD 0x00
 
 #define T6FW_MIN_VERSION_MAJOR 0x00
index f3a09ab559004b80106e957674cb0d6acbf3ea65..4eee18ce9be46b5e1dc35167b51af634a2070e60 100644 (file)
@@ -5078,9 +5078,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
        struct be_adapter *adapter = netdev_priv(dev);
        u8 l4_hdr = 0;
 
-       /* The code below restricts offload features for some tunneled packets.
+       /* The code below restricts offload features for some tunneled and
+        * Q-in-Q packets.
         * Offload features for normal (non tunnel) packets are unchanged.
         */
+       features = vlan_features_check(skb, features);
        if (!skb->encapsulation ||
            !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
                return features;
index e863ba74d005d7f255931b336825df2abadd2fc8..8bb0db990c8fcf8258201f1af5fcf3fa9976b5f9 100644 (file)
@@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev)
        if (ret)
                return ret;
 
+       napi_enable(&priv->napi);
+
        ethoc_init_ring(priv, dev->mem_start);
        ethoc_reset(priv);
 
@@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev)
        priv->old_duplex = -1;
 
        phy_start(dev->phydev);
-       napi_enable(&priv->napi);
 
        if (netif_msg_ifup(priv)) {
                dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
index 56a563f90b0bf51d189dac8eb7da14791cc2b078..f7c8649fd28f695a1ff0519a85491bf85ec11f0b 100644 (file)
@@ -3192,7 +3192,7 @@ static int fec_reset_phy(struct platform_device *pdev)
 {
        int err, phy_reset;
        bool active_high = false;
-       int msec = 1;
+       int msec = 1, phy_post_delay = 0;
        struct device_node *np = pdev->dev.of_node;
 
        if (!np)
@@ -3209,6 +3209,11 @@ static int fec_reset_phy(struct platform_device *pdev)
        else if (!gpio_is_valid(phy_reset))
                return 0;
 
+       err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
+       /* valid reset duration should be less than 1s */
+       if (!err && phy_post_delay > 1000)
+               return -EINVAL;
+
        active_high = of_property_read_bool(np, "phy-reset-active-high");
 
        err = devm_gpio_request_one(&pdev->dev, phy_reset,
@@ -3226,6 +3231,15 @@ static int fec_reset_phy(struct platform_device *pdev)
 
        gpio_set_value_cansleep(phy_reset, !active_high);
 
+       if (!phy_post_delay)
+               return 0;
+
+       if (phy_post_delay > 20)
+               msleep(phy_post_delay);
+       else
+               usleep_range(phy_post_delay * 1000,
+                            phy_post_delay * 1000 + 1000);
+
        return 0;
 }
 #else /* CONFIG_OF */
index 446c7b374ff5c36712d5813cf94b4e9b4ca0b01e..a10de1e9c157d2590eb19122f27fd5dda1a4816b 100644 (file)
@@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
 {
        const struct of_device_id *id =
                of_match_device(fsl_pq_mdio_match, &pdev->dev);
-       const struct fsl_pq_mdio_data *data = id->data;
+       const struct fsl_pq_mdio_data *data;
        struct device_node *np = pdev->dev.of_node;
        struct resource res;
        struct device_node *tbi;
@@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
        struct mii_bus *new_bus;
        int err;
 
+       if (!id) {
+               dev_err(&pdev->dev, "Failed to match device\n");
+               return -ENODEV;
+       }
+
+       data = id->data;
+
        dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
 
        new_bus = mdiobus_alloc_size(sizeof(*priv));
index 4f2d329dba998308eeb2ddaed52733cb3059f605..a93757c255f77445e2245ee8b065d2c6ee31cf3f 100644 (file)
@@ -81,7 +81,7 @@
 static const char ibmvnic_driver_name[] = "ibmvnic";
 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
 
-MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
+MODULE_AUTHOR("Santiago Leon");
 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
index d5c9c9e06ff57e21c1e28d09b9eea18af6e22f1a..150caf6ca2b4bb1da5e0ea63f37fd086c058efad 100644 (file)
@@ -295,7 +295,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
  **/
 void i40e_service_event_schedule(struct i40e_pf *pf)
 {
-       if (!test_bit(__I40E_VSI_DOWN, pf->state) &&
+       if (!test_bit(__I40E_DOWN, pf->state) &&
            !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
                queue_work(i40e_wq, &pf->service_task);
 }
@@ -3611,7 +3611,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
                 * this is not a performance path and napi_schedule()
                 * can deal with rescheduling.
                 */
-               if (!test_bit(__I40E_VSI_DOWN, pf->state))
+               if (!test_bit(__I40E_DOWN, pf->state))
                        napi_schedule_irqoff(&q_vector->napi);
        }
 
@@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
 enable_intr:
        /* re-enable interrupt causes */
        wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
-       if (!test_bit(__I40E_VSI_DOWN, pf->state)) {
+       if (!test_bit(__I40E_DOWN, pf->state)) {
                i40e_service_event_schedule(pf);
                i40e_irq_dynamic_enable_icr0(pf, false);
        }
@@ -6203,7 +6203,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
 {
 
        /* if interface is down do nothing */
-       if (test_bit(__I40E_VSI_DOWN, pf->state))
+       if (test_bit(__I40E_DOWN, pf->state))
                return;
 
        if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
@@ -6344,7 +6344,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
        int i;
 
        /* if interface is down do nothing */
-       if (test_bit(__I40E_VSI_DOWN, pf->state) ||
+       if (test_bit(__I40E_DOWN, pf->state) ||
            test_bit(__I40E_CONFIG_BUSY, pf->state))
                return;
 
@@ -6399,9 +6399,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
                reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
        }
-       if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) {
-               reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED);
-               clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state);
+       if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
+               reset_flags |= BIT(__I40E_DOWN_REQUESTED);
+               clear_bit(__I40E_DOWN_REQUESTED, pf->state);
        }
 
        /* If there's a recovery already waiting, it takes
@@ -6415,7 +6415,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 
        /* If we're already down or resetting, just bail */
        if (reset_flags &&
-           !test_bit(__I40E_VSI_DOWN, pf->state) &&
+           !test_bit(__I40E_DOWN, pf->state) &&
            !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
                rtnl_lock();
                i40e_do_reset(pf, reset_flags, true);
@@ -7002,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
        u32 val;
        int v;
 
-       if (test_bit(__I40E_VSI_DOWN, pf->state))
+       if (test_bit(__I40E_DOWN, pf->state))
                goto clear_recovery;
        dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
 
@@ -9767,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
                return -ENODEV;
        }
        if (vsi == pf->vsi[pf->lan_vsi] &&
-           !test_bit(__I40E_VSI_DOWN, pf->state)) {
+           !test_bit(__I40E_DOWN, pf->state)) {
                dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
                return -ENODEV;
        }
@@ -11003,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        pf->next_vsi = 0;
        pf->pdev = pdev;
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
 
        hw = &pf->hw;
        hw->back = pf;
@@ -11293,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * before setting up the misc vector or we get a race and the vector
         * ends up disabled forever.
         */
-       clear_bit(__I40E_VSI_DOWN, pf->state);
+       clear_bit(__I40E_DOWN, pf->state);
 
        /* In case of MSIX we are going to setup the misc vector right here
         * to handle admin queue events etc. In case of legacy and MSI
@@ -11448,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* Unwind what we've done if something failed in the setup */
 err_vsis:
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
        i40e_clear_interrupt_scheme(pf);
        kfree(pf->vsi);
 err_switch_setup:
@@ -11500,7 +11500,7 @@ static void i40e_remove(struct pci_dev *pdev)
 
        /* no more scheduling of any task */
        set_bit(__I40E_SUSPENDED, pf->state);
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
        if (pf->service_timer.data)
                del_timer_sync(&pf->service_timer);
        if (pf->service_task.func)
@@ -11740,7 +11740,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
        struct i40e_hw *hw = &pf->hw;
 
        set_bit(__I40E_SUSPENDED, pf->state);
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
        rtnl_lock();
        i40e_prep_for_reset(pf, true);
        rtnl_unlock();
@@ -11789,7 +11789,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
        int retval = 0;
 
        set_bit(__I40E_SUSPENDED, pf->state);
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
 
        if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
                i40e_enable_mc_magic_wake(pf);
@@ -11841,7 +11841,7 @@ static int i40e_resume(struct pci_dev *pdev)
 
        /* handling the reset will rebuild the device state */
        if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
-               clear_bit(__I40E_VSI_DOWN, pf->state);
+               clear_bit(__I40E_DOWN, pf->state);
                rtnl_lock();
                i40e_reset_and_rebuild(pf, false, true);
                rtnl_unlock();
index 29321a6167a6675757e74ab4e3cd1a100cb423b0..cd894f4023b1b68cc4e202ff7064e63e2f8be031 100644 (file)
@@ -1854,7 +1854,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = SKB_DATA_ALIGN(size);
+       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+                               SKB_DATA_ALIGN(I40E_SKB_PAD + size);
 #endif
        struct sk_buff *skb;
 
index dfe241a12ad0756d10a77b954486d2194a31c5ca..12b02e5305038d55fcee5d2109b4ab534a09b1bb 100644 (file)
@@ -1190,7 +1190,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = SKB_DATA_ALIGN(size);
+       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+                               SKB_DATA_ALIGN(I40E_SKB_PAD + size);
 #endif
        struct sk_buff *skb;
 
index ae5fdc2df65412afce4e72b8384c9c4b3302c56e..ffbcb27c05e55f43630a812249bab21609886dd9 100644 (file)
@@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev,
                qpn = priv->drop_qp.qpn;
        else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
                qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
-               if (qpn < priv->rss_map.base_qpn ||
-                   qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
-                       en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
-                       return -EINVAL;
-               }
        } else {
                if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
                        en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
index 1a670b68155550fe9f61fb2bc2c7a3688391249c..0710b367746468f1d4faeb5b8a8f3266ca941674 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/etherdevice.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 #include <linux/export.h>
 
 #include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
 
+       if (!mlx4_qp_lookup(dev, rule->qpn)) {
+               mlx4_err_rule(dev, "QP doesn't exist\n", rule);
+               ret = -EINVAL;
+               goto out;
+       }
+
        trans_rule_ctrl_to_hw(rule, mailbox->buf);
 
        size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
 
        list_for_each_entry(cur, &rule->list, list) {
                ret = parse_trans_rule(dev, cur, mailbox->buf + size);
-               if (ret < 0) {
-                       mlx4_free_cmd_mailbox(dev, mailbox);
-                       return ret;
-               }
+               if (ret < 0)
+                       goto out;
+
                size += ret;
        }
 
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
                }
        }
 
+out:
        mlx4_free_cmd_mailbox(dev, mailbox);
 
        return ret;
index 2d6abd4662b143612769ef9b91783249bcd2ac8b..5a310d313e94d08d035c265e6b538c27dcf957c3 100644 (file)
@@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
                __mlx4_qp_free_icm(dev, qpn);
 }
 
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+       struct mlx4_qp *qp;
+
+       spin_lock(&qp_table->lock);
+
+       qp = __mlx4_qp_lookup(dev, qpn);
+
+       spin_unlock(&qp_table->lock);
+       return qp;
+}
+
 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -471,6 +484,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
        }
 
        if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+               if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
+                       mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
+                       err = -EOPNOTSUPP;
+                       goto out;
+               }
+
                qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
                cmd->qp_context.qos_vport = params->qos_vport;
        }
index 07516545474f3ac76e750aaa4af2532b6ac81207..812783865205715e8e88ad66d4ccbfe7172ec6e5 100644 (file)
@@ -5255,6 +5255,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
 }
 
+static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
+                          struct mlx4_vf_immed_vlan_work *work)
+{
+       ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
+       ctx->qp_context.qos_vport = work->qos_vport;
+}
+
 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
 {
        struct mlx4_vf_immed_vlan_work *work =
@@ -5369,11 +5376,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
                                        qp->sched_queue & 0xC7;
                                upd_context->qp_context.pri_path.sched_queue |=
                                        ((work->qos & 0x7) << 3);
-                               upd_context->qp_mask |=
-                                       cpu_to_be64(1ULL <<
-                                                   MLX4_UPD_QP_MASK_QOS_VPP);
-                               upd_context->qp_context.qos_vport =
-                                       work->qos_vport;
+
+                               if (dev->caps.flags2 &
+                                   MLX4_DEV_CAP_FLAG2_QOS_VPP)
+                                       update_qos_vpp(upd_context, work);
                        }
 
                        err = mlx4_cmd(dev, mailbox->dma,
index 5bdaf3d545b2fc656a318d5b562f940e14ecd9d9..10d282841f5be16c0957c72111c68baf6a452ca9 100644 (file)
@@ -774,7 +774,7 @@ static void cb_timeout_handler(struct work_struct *work)
        mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
                       mlx5_command_str(msg_to_opcode(ent->in)),
                       msg_to_opcode(ent->in));
-       mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+       mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 }
 
 static void cmd_work_handler(struct work_struct *work)
@@ -804,6 +804,7 @@ static void cmd_work_handler(struct work_struct *work)
        }
 
        cmd->ent_arr[ent->idx] = ent;
+       set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
        lay = get_inst(cmd, ent->idx);
        ent->lay = lay;
        memset(lay, 0, sizeof(*lay));
@@ -825,6 +826,20 @@ static void cmd_work_handler(struct work_struct *work)
        if (ent->callback)
                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
 
+       /* Skip sending command to fw if internal error */
+       if (pci_channel_offline(dev->pdev) ||
+           dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+               u8 status = 0;
+               u32 drv_synd;
+
+               ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
+               MLX5_SET(mbox_out, ent->out, status, status);
+               MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
+
+               mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+               return;
+       }
+
        /* ring doorbell after the descriptor is valid */
        mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
        wmb();
@@ -835,7 +850,7 @@ static void cmd_work_handler(struct work_struct *work)
                poll_timeout(ent);
                /* make sure we read the descriptor after ownership is SW */
                rmb();
-               mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+               mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
        }
 }
 
@@ -879,7 +894,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
                wait_for_completion(&ent->done);
        } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
                ent->ret = -ETIMEDOUT;
-               mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+               mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
        }
 
        err = ent->ret;
@@ -1375,7 +1390,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
        }
 }
 
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
        struct mlx5_cmd_work_ent *ent;
@@ -1395,6 +1410,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
                        struct semaphore *sem;
 
                        ent = cmd->ent_arr[i];
+
+                       /* if we already completed the command, ignore it */
+                       if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
+                                               &ent->state)) {
+                               /* only real completion can free the cmd slot */
+                               if (!forced) {
+                                       mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
+                                                     ent->idx);
+                                       free_ent(cmd, ent->idx);
+                               }
+                               continue;
+                       }
+
                        if (ent->callback)
                                cancel_delayed_work(&ent->cb_timeout_work);
                        if (ent->page_queue)
@@ -1417,7 +1445,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
                                mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
                                              ent->ret, deliv_status_to_str(ent->status), ent->status);
                        }
-                       free_ent(cmd, ent->idx);
+
+                       /* only real completion will free the entry slot */
+                       if (!forced)
+                               free_ent(cmd, ent->idx);
 
                        if (ent->callback) {
                                ds = ent->ts2 - ent->ts1;
index 7b1566f0ae58c7c64e3313ed4f6f3fb83bafee5c..66b5fec1531349d14b5324f68a28746cecdf1918 100644 (file)
@@ -1041,6 +1041,8 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
 #define MLX5_IB_GRH_BYTES       40
 #define MLX5_IPOIB_ENCAP_LEN    4
 #define MLX5_GID_SIZE           16
+#define MLX5_IPOIB_PSEUDO_LEN   20
+#define MLX5_IPOIB_HARD_LEN     (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
 
 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
                                         struct mlx5_cqe64 *cqe,
@@ -1048,6 +1050,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
                                         struct sk_buff *skb)
 {
        struct net_device *netdev = rq->netdev;
+       char *pseudo_header;
        u8 *dgid;
        u8 g;
 
@@ -1076,8 +1079,11 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
        if (likely(netdev->features & NETIF_F_RXHASH))
                mlx5e_skb_set_hash(cqe, skb);
 
+       /* 20 bytes of ipoib header and 4 for encap existing */
+       pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
+       memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
        skb_reset_mac_header(skb);
-       skb_pull(skb, MLX5_IPOIB_ENCAP_LEN);
+       skb_pull(skb, MLX5_IPOIB_HARD_LEN);
 
        skb->dev = netdev;
 
index 11c27e4fadf6e09400a432c5498749df73265477..ec63158ab64330c939ffa4ca8c001f8134f8e4e2 100644 (file)
@@ -43,6 +43,7 @@
 #include <net/tc_act/tc_vlan.h>
 #include <net/tc_act/tc_tunnel_key.h>
 #include <net/tc_act/tc_pedit.h>
+#include <net/tc_act/tc_csum.h>
 #include <net/vxlan.h>
 #include <net/arp.h>
 #include "en.h"
@@ -384,7 +385,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
                if (e->flags & MLX5_ENCAP_ENTRY_VALID)
                        mlx5_encap_dealloc(priv->mdev, e->encap_id);
 
-               hlist_del_rcu(&e->encap_hlist);
+               hash_del_rcu(&e->encap_hlist);
                kfree(e->encap_header);
                kfree(e);
        }
@@ -925,11 +926,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                                struct mlx5e_tc_flow_parse_attr *parse_attr)
 {
        struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
-       int i, action_size, nactions, max_actions, first, last;
+       int i, action_size, nactions, max_actions, first, last, first_z;
        void *s_masks_p, *a_masks_p, *vals_p;
-       u32 s_mask, a_mask, val;
        struct mlx5_fields *f;
        u8 cmd, field_bsize;
+       u32 s_mask, a_mask;
        unsigned long mask;
        void *action;
 
@@ -946,7 +947,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
        for (i = 0; i < ARRAY_SIZE(fields); i++) {
                f = &fields[i];
                /* avoid seeing bits set from previous iterations */
-               s_mask = a_mask = mask = val = 0;
+               s_mask = 0;
+               a_mask = 0;
 
                s_masks_p = (void *)set_masks + f->offset;
                a_masks_p = (void *)add_masks + f->offset;
@@ -981,12 +983,12 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                        memset(a_masks_p, 0, f->size);
                }
 
-               memcpy(&val, vals_p, f->size);
-
                field_bsize = f->size * BITS_PER_BYTE;
+
+               first_z = find_first_zero_bit(&mask, field_bsize);
                first = find_first_bit(&mask, field_bsize);
                last  = find_last_bit(&mask, field_bsize);
-               if (first > 0 || last != (field_bsize - 1)) {
+               if (first > 0 || last != (field_bsize - 1) || first_z < last) {
                        printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
                               mask);
                        return -EOPNOTSUPP;
@@ -1002,11 +1004,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                }
 
                if (field_bsize == 32)
-                       MLX5_SET(set_action_in, action, data, ntohl(val));
+                       MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
                else if (field_bsize == 16)
-                       MLX5_SET(set_action_in, action, data, ntohs(val));
+                       MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
                else if (field_bsize == 8)
-                       MLX5_SET(set_action_in, action, data, val);
+                       MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
 
                action += action_size;
                nactions++;
@@ -1109,6 +1111,28 @@ out_err:
        return err;
 }
 
+static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
+{
+       u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
+                        TCA_CSUM_UPDATE_FLAG_UDP;
+
+       /*  The HW recalcs checksums only if re-writing headers */
+       if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+               netdev_warn(priv->netdev,
+                           "TC csum action is only offloaded with pedit\n");
+               return false;
+       }
+
+       if (update_flags & ~prot_flags) {
+               netdev_warn(priv->netdev,
+                           "can't offload TC csum action for some header/s - flags %#x\n",
+                           update_flags);
+               return false;
+       }
+
+       return true;
+}
+
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                struct mlx5e_tc_flow_parse_attr *parse_attr,
                                struct mlx5e_tc_flow *flow)
@@ -1149,6 +1173,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        continue;
                }
 
+               if (is_tcf_csum(a)) {
+                       if (csum_offload_supported(priv, attr->action,
+                                                  tcf_csum_update_flags(a)))
+                               continue;
+
+                       return -EOPNOTSUPP;
+               }
+
                if (is_tcf_skbedit_mark(a)) {
                        u32 mark = tcf_skbedit_mark(a);
 
@@ -1651,6 +1683,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        continue;
                }
 
+               if (is_tcf_csum(a)) {
+                       if (csum_offload_supported(priv, attr->action,
+                                                  tcf_csum_update_flags(a)))
+                               continue;
+
+                       return -EOPNOTSUPP;
+               }
+
                if (is_tcf_mirred_egress_redirect(a)) {
                        int ifindex = tcf_mirred_ifindex(a);
                        struct net_device *out_dev, *encap_dev = NULL;
index ea5d8d37a75c465cf022a4c29d89918802f97bbb..33eae5ad2fb09efe302e2b892c3c9e30d4b117f5 100644 (file)
@@ -422,7 +422,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
                        break;
 
                case MLX5_EVENT_TYPE_CMD:
-                       mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
+                       mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
                        break;
 
                case MLX5_EVENT_TYPE_PORT_CHANGE:
index d0515391d33bbc57961311f648ed5ef25f457c28..44f59b1d6f0f27f7bb4f818b11f341af28ba09dc 100644 (file)
@@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev)
        spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
 
        mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
-       mlx5_cmd_comp_handler(dev, vector);
+       mlx5_cmd_comp_handler(dev, vector, true);
        return;
 
 no_trig:
index 0c123d571b4cf52a0eb3b833208156766b5de6be..af945edfee1905dbe676218cb53123535a37171f 100644 (file)
@@ -612,7 +612,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
        struct mlx5_priv *priv  = &mdev->priv;
        struct msix_entry *msix = priv->msix_arr;
        int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-       int err;
 
        if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
                mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@@ -622,18 +621,11 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
        cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
                        priv->irq_info[i].mask);
 
-       err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
-       if (err) {
-               mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
-                              irq);
-               goto err_clear_mask;
-       }
+       if (IS_ENABLED(CONFIG_SMP) &&
+           irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+               mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
 
        return 0;
-
-err_clear_mask:
-       free_cpumask_var(priv->irq_info[i].mask);
-       return err;
 }
 
 static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
index 537d1236a4fec0a2973d52ed34cf7f73d8a4b052..715b3aaf83ac4d65cdea4eb15f6f2089e402c469 100644 (file)
@@ -1730,7 +1730,8 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
                qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
                break;
        default:
-               DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+               DP_VERBOSE(cdev, QED_MSG_SP,
+                          "Invalid protocol type = %d\n", type);
                return;
        }
 }
index 7245b1072518fff31566c471b6eb32b512e41846..81312924df1407092fd1dd43cc0555d16976160b 100644 (file)
@@ -1824,22 +1824,44 @@ struct qlcnic_hardware_ops {
        u32 (*get_cap_size)(void *, int);
        void (*set_sys_info)(void *, int, u32);
        void (*store_cap_mask)(void *, u32);
+       bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
+       bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
 };
 
 extern struct qlcnic_nic_template qlcnic_vf_ops;
 
-static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
 {
        return adapter->ahw->extra_capability[0] &
               QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
 }
 
-static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
 {
        return adapter->ahw->extra_capability[0] &
               QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
 }
 
+static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+       return false;
+}
+
+static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+        return false;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+        return adapter->ahw->hw_ops->encap_rx_offload(adapter);
+}
+
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+        return adapter->ahw->hw_ops->encap_tx_offload(adapter);
+}
+
 static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
 {
        return adapter->nic_ops->start_firmware(adapter);
index 4fb68797630e9531e7ffc4e7bc7c015313a44063..f7080d0ab8746263c6955163640964eac2fd2cb0 100644 (file)
@@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
        .get_cap_size                   = qlcnic_83xx_get_cap_size,
        .set_sys_info                   = qlcnic_83xx_set_sys_info,
        .store_cap_mask                 = qlcnic_83xx_store_cap_mask,
+       .encap_rx_offload               = qlcnic_83xx_encap_rx_offload,
+       .encap_tx_offload               = qlcnic_83xx_encap_tx_offload,
 };
 
 static struct qlcnic_nic_template qlcnic_83xx_ops = {
index 838cc0ceafd8d0824495206bf30cdaf53f98bc59..7848cf04b29a83f0a356b3eb5360d6a6e65871f0 100644 (file)
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
                        }
                        return -EIO;
                }
-               usleep_range(1000, 1500);
+               udelay(1200);
        }
 
        if (id_reg)
index b6628aaa6e4a45a8eaecd9d5fc4ea8136d7a07af..1b5f7d57b6f8fed6a8b232adfdee76b2cbaff13f 100644 (file)
@@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
        .get_cap_size                   = qlcnic_82xx_get_cap_size,
        .set_sys_info                   = qlcnic_82xx_set_sys_info,
        .store_cap_mask                 = qlcnic_82xx_store_cap_mask,
+       .encap_rx_offload               = qlcnic_82xx_encap_rx_offload,
+       .encap_tx_offload               = qlcnic_82xx_encap_tx_offload,
 };
 
 static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
index 2f656f395f39699e4cec53f4ff25ea7e745d1041..c58180f408448e9a86a7ce40c6fb286063a6afb0 100644 (file)
@@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
        .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
        .enable_sds_intr                = qlcnic_83xx_enable_sds_intr,
        .disable_sds_intr               = qlcnic_83xx_disable_sds_intr,
+       .encap_rx_offload               = qlcnic_83xx_encap_rx_offload,
+       .encap_tx_offload               = qlcnic_83xx_encap_tx_offload,
 };
 
 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
index cc065ffbe4b5584a6498237d1e4a929ff1d6ebd0..bcd4708b374574fb06faf28d9b0a6cc90bc9c56d 100644 (file)
@@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt)
        emac_mac_config(adpt);
        emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
 
-       adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
+       adpt->phydev->irq = PHY_POLL;
        ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
                                 PHY_INTERFACE_MODE_SGMII);
        if (ret) {
index 441c1936648993fa394e5c676b7bd9957e52efa3..18461fcb981501efd7015634999cb787041c01a7 100644 (file)
 /* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
  */
 
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_net.h>
 #include <linux/of_mdio.h>
 #include <linux/phy.h>
 #include <linux/iopoll.h>
 #include <linux/acpi.h>
 #include "emac.h"
-#include "emac-mac.h"
 
 /* EMAC base register offsets */
 #define EMAC_MDIO_CTRL                                        0x001414
 
 #define MDIO_WAIT_TIMES                                           1000
 
-#define EMAC_LINK_SPEED_DEFAULT (\
-               EMAC_LINK_SPEED_10_HALF  |\
-               EMAC_LINK_SPEED_10_FULL  |\
-               EMAC_LINK_SPEED_100_HALF |\
-               EMAC_LINK_SPEED_100_FULL |\
-               EMAC_LINK_SPEED_1GB_FULL)
-
-/**
- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
- * @adpt: the emac adapter
- *
- * The autopoll feature takes over the MDIO bus.  In order for
- * the PHY driver to be able to talk to the PHY over the MDIO
- * bus, we need to temporarily disable the autopoll feature.
- */
-static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
-{
-       u32 val;
-
-       /* disable autopoll */
-       emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
-
-       /* wait for any mdio polling to complete */
-       if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
-                               !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
-               return 0;
-
-       /* failed to disable; ensure it is enabled before returning */
-       emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
-
-       return -EBUSY;
-}
-
-/**
- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
- * @adpt: the emac adapter
- *
- * The EMAC has the ability to poll the external PHY on the MDIO
- * bus for link state changes.  This eliminates the need for the
- * driver to poll the phy.  If if the link state does change,
- * the EMAC issues an interrupt on behalf of the PHY.
- */
-static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
-{
-       emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
-}
-
 static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
 {
        struct emac_adapter *adpt = bus->priv;
        u32 reg;
-       int ret;
-
-       ret = emac_phy_mdio_autopoll_disable(adpt);
-       if (ret)
-               return ret;
 
        emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
                          (addr << PHY_ADDR_SHFT));
@@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
        if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
                               !(reg & (MDIO_START | MDIO_BUSY)),
                               100, MDIO_WAIT_TIMES * 100))
-               ret = -EIO;
-       else
-               ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
+               return -EIO;
 
-       emac_phy_mdio_autopoll_enable(adpt);
-
-       return ret;
+       return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
 }
 
 static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
 {
        struct emac_adapter *adpt = bus->priv;
        u32 reg;
-       int ret;
-
-       ret = emac_phy_mdio_autopoll_disable(adpt);
-       if (ret)
-               return ret;
 
        emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
                          (addr << PHY_ADDR_SHFT));
@@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
        if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
                               !(reg & (MDIO_START | MDIO_BUSY)), 100,
                               MDIO_WAIT_TIMES * 100))
-               ret = -EIO;
+               return -EIO;
 
-       emac_phy_mdio_autopoll_enable(adpt);
-
-       return ret;
+       return 0;
 }
 
 /* Configure the MDIO bus and connect the external PHY */
index 28a8cdc364851e56a5757a8f2970853c0a462cc4..98a326faea294eec0c59f9b0ffe15d57046ce5eb 100644 (file)
 #define DMAR_DLY_CNT_DEF                                   15
 #define DMAW_DLY_CNT_DEF                                    4
 
-#define IMR_NORMAL_MASK         (\
-               ISR_ERROR       |\
-               ISR_GPHY_LINK   |\
-               ISR_TX_PKT      |\
-               GPHY_WAKEUP_INT)
-
-#define IMR_EXTENDED_MASK       (\
-               SW_MAN_INT      |\
-               ISR_OVER        |\
-               ISR_ERROR       |\
-               ISR_GPHY_LINK   |\
-               ISR_TX_PKT      |\
-               GPHY_WAKEUP_INT)
+#define IMR_NORMAL_MASK                (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
 
 #define ISR_TX_PKT      (\
        TX_PKT_INT      |\
        TX_PKT_INT2     |\
        TX_PKT_INT3)
 
-#define ISR_GPHY_LINK        (\
-       GPHY_LINK_UP_INT     |\
-       GPHY_LINK_DOWN_INT)
-
 #define ISR_OVER        (\
        RFD0_UR_INT     |\
        RFD1_UR_INT     |\
@@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data)
        if (status & ISR_OVER)
                net_warn_ratelimited("warning: TX/RX overflow\n");
 
-       /* link event */
-       if (status & ISR_GPHY_LINK)
-               phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
-
 exit:
        /* enable the interrupt */
        writel(irq->mask, adpt->base + EMAC_INT_MASK);
index 3cd7989c007dfe46947e2ddb366a904f1af90198..784782da3a85b638e9e2a195fe66b15c72fe0fe5 100644 (file)
@@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
        int ring_size;
        int i;
 
-       /* Free RX skb ringbuffer */
-       if (priv->rx_skb[q]) {
-               for (i = 0; i < priv->num_rx_ring[q]; i++)
-                       dev_kfree_skb(priv->rx_skb[q][i]);
-       }
-       kfree(priv->rx_skb[q]);
-       priv->rx_skb[q] = NULL;
-
-       /* Free aligned TX buffers */
-       kfree(priv->tx_align[q]);
-       priv->tx_align[q] = NULL;
-
        if (priv->rx_ring[q]) {
                for (i = 0; i < priv->num_rx_ring[q]; i++) {
                        struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
                priv->tx_ring[q] = NULL;
        }
 
+       /* Free RX skb ringbuffer */
+       if (priv->rx_skb[q]) {
+               for (i = 0; i < priv->num_rx_ring[q]; i++)
+                       dev_kfree_skb(priv->rx_skb[q][i]);
+       }
+       kfree(priv->rx_skb[q]);
+       priv->rx_skb[q] = NULL;
+
+       /* Free aligned TX buffers */
+       kfree(priv->tx_align[q]);
+       priv->tx_align[q] = NULL;
+
        /* Free TX skb ringbuffer.
         * SKBs are freed by ravb_tx_free() call above.
         */
index 489ef146201e61c629c17010f672a621642e94b3..6a9c954492f225987d5dc63713034548e7aabdbb 100644 (file)
@@ -37,6 +37,7 @@
 #define TSE_PCS_CONTROL_AN_EN_MASK                     BIT(12)
 #define TSE_PCS_CONTROL_REG                            0x00
 #define TSE_PCS_CONTROL_RESTART_AN_MASK                        BIT(9)
+#define TSE_PCS_CTRL_AUTONEG_SGMII                     0x1140
 #define TSE_PCS_IF_MODE_REG                            0x28
 #define TSE_PCS_LINK_TIMER_0_REG                       0x24
 #define TSE_PCS_LINK_TIMER_1_REG                       0x26
@@ -65,6 +66,7 @@
 #define TSE_PCS_SW_RESET_TIMEOUT                       100
 #define TSE_PCS_USE_SGMII_AN_MASK                      BIT(1)
 #define TSE_PCS_USE_SGMII_ENA                          BIT(0)
+#define TSE_PCS_IF_USE_SGMII                           0x03
 
 #define SGMII_ADAPTER_CTRL_REG                         0x00
 #define SGMII_ADAPTER_DISABLE                          0x0001
@@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
 {
        int ret = 0;
 
-       writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
+       writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
+
+       writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
 
        writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
        writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
index a74c481401c46ee659b1244b0124966cabeafdbd..12236daf7bb6d5358fdafe50e37227e19b95bc33 100644 (file)
@@ -1208,7 +1208,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
        u32 rx_count = priv->plat->rx_queues_to_use;
        unsigned int bfsize = 0;
        int ret = -ENOMEM;
-       u32 queue;
+       int queue;
        int i;
 
        if (priv->hw->mode->set_16kib_bfsize)
@@ -2724,7 +2724,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
 
                priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
                        0, 1,
-                       (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+                       (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
                        0, 0);
 
                tmp_len -= TSO_MAX_BUFF_SIZE;
@@ -2947,7 +2947,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        int i, csum_insertion = 0, is_jumbo = 0;
        u32 queue = skb_get_queue_mapping(skb);
        int nfrags = skb_shinfo(skb)->nr_frags;
-       unsigned int entry, first_entry;
+       int entry;
+       unsigned int first_entry;
        struct dma_desc *desc, *first;
        struct stmmac_tx_queue *tx_q;
        unsigned int enh_desc;
index dec5d563ab19091cdadd422eed9af8268ce5805e..6ebb0f559a427fdb4d27d9b668b46d7151650043 100644 (file)
@@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
 
        /* make enough headroom for basic scenario */
        encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
-       if (ip_tunnel_info_af(info) == AF_INET) {
+       if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
                encap_len += sizeof(struct iphdr);
                dev->max_mtu -= sizeof(struct iphdr);
        } else {
@@ -1293,7 +1293,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
        if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
                goto nla_put_failure;
 
-       if (ip_tunnel_info_af(info) == AF_INET) {
+       if (rtnl_dereference(geneve->sock4)) {
                if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
                                    info->key.u.ipv4.dst))
                        goto nla_put_failure;
@@ -1302,8 +1302,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
                               !!(info->key.tun_flags & TUNNEL_CSUM)))
                        goto nla_put_failure;
 
+       }
+
 #if IS_ENABLED(CONFIG_IPV6)
-       } else {
+       if (rtnl_dereference(geneve->sock6)) {
                if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
                                     &info->key.u.ipv6.dst))
                        goto nla_put_failure;
@@ -1315,8 +1317,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
                if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
                               !geneve->use_udp6_rx_checksums))
                        goto nla_put_failure;
-#endif
        }
+#endif
 
        if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
            nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||
index 4fea1b3dfbb4457247c4bc98dc9378591c36ecd3..7b652bb7ebe407b35c054009b521b173fd9fa361 100644 (file)
@@ -873,7 +873,7 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
 
        /* Check if there's an existing gtpX device to configure */
        dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
-       if (dev->netdev_ops == &gtp_netdev_ops)
+       if (dev && dev->netdev_ops == &gtp_netdev_ops)
                gtp = netdev_priv(dev);
 
        put_net(net);
index 8c3633c1d0789718fc528b9873e8295e6ab6b09e..97e3bc60c3e7d111184f4c60ce4afe50757d1398 100644 (file)
@@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case HDLCDRVCTL_CALIBRATE:
                if(!capable(CAP_SYS_RAWIO))
                        return -EPERM;
+               if (s->par.bitrate <= 0)
+                       return -EINVAL;
                if (bi.data.calibrate > INT_MAX / s->par.bitrate)
                        return -EINVAL;
                s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
index 60ffc9da6a286272d84e8dc3f4326eb908e7961a..c360dd6ead2213b112282ff508b963354cf15d72 100644 (file)
@@ -108,7 +108,7 @@ config MDIO_MOXART
 config MDIO_OCTEON
        tristate "Octeon and some ThunderX SOCs MDIO buses"
        depends on 64BIT
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && OF_MDIO
        select MDIO_CAVIUM
        help
          This module provides a driver for the Octeon and ThunderX MDIO
index 272b051a019975110aa1d117da993cf18cb98816..57297ba239871c631baef1dcfc2c30a66fdf3bba 100644 (file)
@@ -255,34 +255,6 @@ static int marvell_config_aneg(struct phy_device *phydev)
 {
        int err;
 
-       /* The Marvell PHY has an errata which requires
-        * that certain registers get written in order
-        * to restart autonegotiation */
-       err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-
-       if (err < 0)
-               return err;
-
-       err = phy_write(phydev, 0x1d, 0x1f);
-       if (err < 0)
-               return err;
-
-       err = phy_write(phydev, 0x1e, 0x200c);
-       if (err < 0)
-               return err;
-
-       err = phy_write(phydev, 0x1d, 0x5);
-       if (err < 0)
-               return err;
-
-       err = phy_write(phydev, 0x1e, 0);
-       if (err < 0)
-               return err;
-
-       err = phy_write(phydev, 0x1e, 0x100);
-       if (err < 0)
-               return err;
-
        err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
        if (err < 0)
                return err;
@@ -316,6 +288,42 @@ static int marvell_config_aneg(struct phy_device *phydev)
        return 0;
 }
 
+static int m88e1101_config_aneg(struct phy_device *phydev)
+{
+       int err;
+
+       /* This Marvell PHY has an errata which requires
+        * that certain registers get written in order
+        * to restart autonegotiation
+        */
+       err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, 0x1d, 0x1f);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, 0x1e, 0x200c);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, 0x1d, 0x5);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, 0x1e, 0);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, 0x1e, 0x100);
+       if (err < 0)
+               return err;
+
+       return marvell_config_aneg(phydev);
+}
+
 static int m88e1111_config_aneg(struct phy_device *phydev)
 {
        int err;
@@ -1119,8 +1127,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
                if (adv < 0)
                        return adv;
 
-               lpa &= adv;
-
                if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
                        phydev->duplex = DUPLEX_FULL;
                else
@@ -1892,7 +1898,7 @@ static struct phy_driver marvell_drivers[] = {
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
                .config_init = &marvell_config_init,
-               .config_aneg = &marvell_config_aneg,
+               .config_aneg = &m88e1101_config_aneg,
                .read_status = &genphy_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
                .config_intr = &marvell_config_intr,
index 8e73f5f36e7120a5aa28b6e0dfb992eca3330e3e..f99c21f78b639fc1e6b984a20383f62021b757eb 100644 (file)
@@ -658,6 +658,18 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
        return 0;
 }
 
+static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       int rc;
+
+       /* Some devices have extra OF data and an OF-style MODALIAS */
+       rc = of_device_uevent_modalias(dev, env);
+       if (rc != -ENODEV)
+               return rc;
+
+       return 0;
+}
+
 #ifdef CONFIG_PM
 static int mdio_bus_suspend(struct device *dev)
 {
@@ -708,6 +720,7 @@ static const struct dev_pm_ops mdio_bus_pm_ops = {
 struct bus_type mdio_bus_type = {
        .name           = "mdio_bus",
        .match          = mdio_bus_match,
+       .uevent         = mdio_uevent,
        .pm             = MDIO_BUS_PM_OPS,
 };
 EXPORT_SYMBOL(mdio_bus_type);
index 6a5fd18f062c4ea400bc8036d787a94fe5108a34..b9252b8d81ffb720272ca5f0b25910c021eb28a3 100644 (file)
@@ -268,23 +268,12 @@ out:
        return ret;
 }
 
-static int kszphy_config_init(struct phy_device *phydev)
+/* Some config bits need to be set again on resume, handle them here. */
+static int kszphy_config_reset(struct phy_device *phydev)
 {
        struct kszphy_priv *priv = phydev->priv;
-       const struct kszphy_type *type;
        int ret;
 
-       if (!priv)
-               return 0;
-
-       type = priv->type;
-
-       if (type->has_broadcast_disable)
-               kszphy_broadcast_disable(phydev);
-
-       if (type->has_nand_tree_disable)
-               kszphy_nand_tree_disable(phydev);
-
        if (priv->rmii_ref_clk_sel) {
                ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
                if (ret) {
@@ -295,11 +284,30 @@ static int kszphy_config_init(struct phy_device *phydev)
        }
 
        if (priv->led_mode >= 0)
-               kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
+               kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
 
        return 0;
 }
 
+static int kszphy_config_init(struct phy_device *phydev)
+{
+       struct kszphy_priv *priv = phydev->priv;
+       const struct kszphy_type *type;
+
+       if (!priv)
+               return 0;
+
+       type = priv->type;
+
+       if (type->has_broadcast_disable)
+               kszphy_broadcast_disable(phydev);
+
+       if (type->has_nand_tree_disable)
+               kszphy_nand_tree_disable(phydev);
+
+       return kszphy_config_reset(phydev);
+}
+
 static int ksz8041_config_init(struct phy_device *phydev)
 {
        struct device_node *of_node = phydev->mdio.dev.of_node;
@@ -700,8 +708,14 @@ static int kszphy_suspend(struct phy_device *phydev)
 
 static int kszphy_resume(struct phy_device *phydev)
 {
+       int ret;
+
        genphy_resume(phydev);
 
+       ret = kszphy_config_reset(phydev);
+       if (ret)
+               return ret;
+
        /* Enable PHY Interrupts */
        if (phy_interrupt_is_valid(phydev)) {
                phydev->interrupts = PHY_INTERRUPT_ENABLED;
index 82ab8fb82587553fefc1d05bbff06d4a76bc9679..7524caa0f29d9806e11826c7ecfd57842bff1822 100644 (file)
@@ -241,7 +241,7 @@ static const struct phy_setting settings[] = {
  * phy_lookup_setting - lookup a PHY setting
  * @speed: speed to match
  * @duplex: duplex to match
- * @feature: allowed link modes
+ * @features: allowed link modes
  * @exact: an exact match is required
  *
  * Search the settings array for a setting that matches the speed and
index f3ae88fdf332e890ac8273e3df1ca7dd53092c07..8ab281b478f23bd98d71b896a0c00c4fdba7dacc 100644 (file)
@@ -310,6 +310,26 @@ skip:
                return -ENODEV;
        }
 
+       return 0;
+
+bad_desc:
+       dev_info(&dev->udev->dev, "bad CDC descriptors\n");
+       return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind);
+
+
+/* like usbnet_generic_cdc_bind() but handles filter initialization
+ * correctly
+ */
+int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+       int rv;
+
+       rv = usbnet_generic_cdc_bind(dev, intf);
+       if (rv < 0)
+               goto bail_out;
+
        /* Some devices don't initialise properly. In particular
         * the packet filter is not reset. There are devices that
         * don't do reset all the way. So the packet filter should
@@ -317,13 +337,10 @@ skip:
         */
        usbnet_cdc_update_filter(dev);
 
-       return 0;
-
-bad_desc:
-       dev_info(&dev->udev->dev, "bad CDC descriptors\n");
-       return -ENODEV;
+bail_out:
+       return rv;
 }
-EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind);
+EXPORT_SYMBOL_GPL(usbnet_ether_cdc_bind);
 
 void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
@@ -417,7 +434,7 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
        BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
                        < sizeof(struct cdc_state)));
 
-       status = usbnet_generic_cdc_bind(dev, intf);
+       status = usbnet_ether_cdc_bind(dev, intf);
        if (status < 0)
                return status;
 
index 765400b62168436b278d8a09fa0dd1fd8566bcb7..2dfca96a63b60283b89efab676932a711024a499 100644 (file)
@@ -681,7 +681,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
        if (ret < 0)
                return ret;
 
-       if (features & NETIF_F_HW_CSUM)
+       if (features & NETIF_F_IP_CSUM)
                read_buf |= Tx_COE_EN_;
        else
                read_buf &= ~Tx_COE_EN_;
@@ -1279,12 +1279,19 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
 
        spin_lock_init(&pdata->mac_cr_lock);
 
+       /* LAN95xx devices do not alter the computed checksum of 0 to 0xffff.
+        * RFC 2460, ipv6 UDP calculated checksum yields a result of zero must
+        * be changed to 0xffff. RFC 768, ipv4 UDP computed checksum is zero,
+        * it is transmitted as all ones. The zero transmitted checksum means
+        * transmitter generated no checksum. Hence, enable csum offload only
+        * for ipv4 packets.
+        */
        if (DEFAULT_TX_CSUM_ENABLE)
-               dev->net->features |= NETIF_F_HW_CSUM;
+               dev->net->features |= NETIF_F_IP_CSUM;
        if (DEFAULT_RX_CSUM_ENABLE)
                dev->net->features |= NETIF_F_RXCSUM;
 
-       dev->net->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+       dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
 
        smsc95xx_init_mac_address(dev);
 
index 9320d96a1632bbebe8bd1d4a04059e0df631ac19..a871f45ecc79a438b2b43465d3719f240ff25cb5 100644 (file)
@@ -869,7 +869,7 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
        unsigned int len;
 
        len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
-                               rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len);
+                               rq->min_buf_len, PAGE_SIZE - hdr_len);
        return ALIGN(len, L1_CACHE_BYTES);
 }
 
@@ -1989,6 +1989,7 @@ static const struct net_device_ops virtnet_netdev = {
        .ndo_poll_controller = virtnet_netpoll,
 #endif
        .ndo_xdp                = virtnet_xdp,
+       .ndo_features_check     = passthru_features_check,
 };
 
 static void virtnet_config_changed_work(struct work_struct *work)
@@ -2143,7 +2144,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
        unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
        unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
 
-       return max(min_buf_len, hdr_len);
+       return max(max(min_buf_len, hdr_len) - hdr_len,
+                  (unsigned int)GOOD_PACKET_LEN);
 }
 
 static int virtnet_find_vqs(struct virtnet_info *vi)
index 328b4712683c334bf1de66a3a3789d0a04d734c3..a6b5052c1d36bb99260dd4232842fa9e8df2621c 100644 (file)
@@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
 
 static int vxlan_sock_add(struct vxlan_dev *vxlan);
 
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
+
 /* per-network namespace private data for this module */
 struct vxlan_net {
        struct list_head  vxlan_list;
@@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
        call_rcu(&f->rcu, vxlan_fdb_free);
 }
 
+static void vxlan_dst_free(struct rcu_head *head)
+{
+       struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
+
+       dst_cache_destroy(&rd->dst_cache);
+       kfree(rd);
+}
+
+static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+                                 struct vxlan_rdst *rd)
+{
+       list_del_rcu(&rd->list);
+       vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+       call_rcu(&rd->rcu, vxlan_dst_free);
+}
+
 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
                           union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
                           __be32 *vni, u32 *ifindex)
@@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
         * otherwise destroy the fdb entry
         */
        if (rd && !list_is_singular(&f->remotes)) {
-               list_del_rcu(&rd->list);
-               vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
-               kfree_rcu(rd, rcu);
+               vxlan_fdb_dst_destroy(vxlan, f, rd);
                goto out;
        }
 
@@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
        rcu_assign_pointer(vxlan->vn4_sock, NULL);
        synchronize_net();
 
+       vxlan_vs_del_dev(vxlan);
+
        if (__vxlan_sock_release_prep(sock4)) {
                udp_tunnel_sock_release(sock4->sock);
                kfree(sock4);
@@ -2342,6 +2360,15 @@ static void vxlan_cleanup(unsigned long arg)
        mod_timer(&vxlan->age_timer, next_timer);
 }
 
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
+{
+       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+
+       spin_lock(&vn->sock_lock);
+       hlist_del_init_rcu(&vxlan->hlist);
+       spin_unlock(&vn->sock_lock);
+}
+
 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
 {
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -3286,15 +3313,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 
        vxlan_flush(vxlan, true);
 
-       spin_lock(&vn->sock_lock);
-       if (!hlist_unhashed(&vxlan->hlist))
-               hlist_del_rcu(&vxlan->hlist);
-       spin_unlock(&vn->sock_lock);
-
        gro_cells_destroy(&vxlan->gro_cells);
        list_del(&vxlan->next);
        unregister_netdevice_queue(dev, head);
index d5e993dc9b238c6fd306b801083346570eec8790..517a315e259b79f05f2d8d3e88c371a9f39ad9e9 100644 (file)
@@ -1271,6 +1271,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
        qcom_smem_state_put(wcn->tx_enable_state);
        qcom_smem_state_put(wcn->tx_rings_empty_state);
 
+       rpmsg_destroy_ept(wcn->smd_channel);
+
        iounmap(wcn->dxe_base);
        iounmap(wcn->ccu_base);
 
index fc64b8913aa6a11c0111fec3b9d900174dc250c3..e03450059b06c0bfe510148f985c19668bcd3dff 100644 (file)
@@ -3422,7 +3422,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
                /* otherwise, set txglomalign */
                value = sdiodev->settings->bus.sdio.sd_sgentry_align;
                /* SDIO ADMA requires at least 32 bit alignment */
-               value = max_t(u32, value, 4);
+               value = max_t(u32, value, ALIGNMENT);
                err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
                                           sizeof(u32));
        }
index 3b3e076571d6d7089ae2df09d2dfbd2008cf81e4..45e2efc70d19e5f44c7a5e2a1cde2cf9a7448f91 100644 (file)
@@ -79,8 +79,8 @@
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  17
 #define IWL7265_UCODE_API_MIN  17
-#define IWL7265D_UCODE_API_MIN 17
-#define IWL3168_UCODE_API_MIN  20
+#define IWL7265D_UCODE_API_MIN 22
+#define IWL3168_UCODE_API_MIN  22
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
index b9718c0cf17480dc4c1ab212fdbfabecb3125775..89137717c1fce778d63a55af99f16f970f3b34d6 100644 (file)
@@ -74,8 +74,8 @@
 #define IWL8265_UCODE_API_MAX  30
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN  17
-#define IWL8265_UCODE_API_MIN  20
+#define IWL8000_UCODE_API_MIN  22
+#define IWL8265_UCODE_API_MIN  22
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION            0x0a1d
index 306bc967742ee9a7b5629bab00f20cab77309fa1..77efbb78e867bfac47ae992c2703275e1b5062b2 100644 (file)
 #define MON_DMARB_RD_DATA_ADDR         (0xa03c5c)
 
 #define DBGC_IN_SAMPLE                 (0xa03c00)
+#define DBGC_OUT_CTRL                  (0xa03c0c)
 
 /* enable the ID buf for read */
 #define WFPM_PS_CTL_CLR                        0xA0300C
index 1b7d265ffb0acb476228b5b2a10040c09e41d61c..a10c6aae9ab98de7c752b05b9cc33059337b1410 100644 (file)
@@ -307,6 +307,11 @@ enum {
 /* Bit 1-3: LQ command color. Used to match responses to LQ commands */
 #define LQ_FLAG_COLOR_POS               1
 #define LQ_FLAG_COLOR_MSK               (7 << LQ_FLAG_COLOR_POS)
+#define LQ_FLAG_COLOR_GET(_f)          (((_f) & LQ_FLAG_COLOR_MSK) >>\
+                                        LQ_FLAG_COLOR_POS)
+#define LQ_FLAGS_COLOR_INC(_c)         ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
+                                        LQ_FLAG_COLOR_MSK)
+#define LQ_FLAG_COLOR_SET(_f, _c)      ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
 
 /* Bit 4-5: Tx RTS BW Signalling
  * (0) No RTS BW signalling
index 81b98915b1a42e21a318d293c459015688489c7f..1360ebfdc51bc6475c2c68301781b2296bb03369 100644 (file)
@@ -519,8 +519,11 @@ struct agg_tx_status {
  * bit-7 invalid rate indication
  */
 #define TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define TX_RES_RATE_TABLE_COLOR_POS 4
 #define TX_RES_RATE_TABLE_COLOR_MSK 0x70
 #define TX_RES_INV_RATE_INDEX_MSK 0x80
+#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
+                                      TX_RES_RATE_TABLE_COLOR_POS)
 
 #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
 #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
index 7b86a4f1b574c6f507fbde87c1276241fd534a4a..c8712e6eea74187af9c0d8ef3a73622ab7d1d5fd 100644 (file)
@@ -1002,14 +1002,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
        return 0;
 }
 
-static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
-{
-       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
-               iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
-       else
-               iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
-}
-
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
 {
        u8 *ptr;
@@ -1023,10 +1015,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
        /* EARLY START - firmware's configuration is hard coded */
        if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
             !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
-           conf_id == FW_DBG_START_FROM_ALIVE) {
-               iwl_mvm_restart_early_start(mvm);
+           conf_id == FW_DBG_START_FROM_ALIVE)
                return 0;
-       }
 
        if (!mvm->fw->dbg_conf_tlv[conf_id])
                return -EINVAL;
index 0f1831b419159b606967889ff8ea20c7ef86f0f2..fd2fc46e2fe51d8e8f1930af05e40ee9400c84a6 100644 (file)
@@ -1040,7 +1040,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
                struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
                struct iwl_mac_beacon_cmd_v7 beacon_cmd;
        } u = {};
-       struct iwl_mac_beacon_cmd beacon_cmd;
+       struct iwl_mac_beacon_cmd beacon_cmd = {};
        struct ieee80211_tx_info *info;
        u32 beacon_skb_len;
        u32 rate, tx_flags;
index 4e74a6b90e70626d6e0e8bb8092796f078c7d55d..52f8d7a6a7dcec95d32089a11c7c9e37b53748a7 100644 (file)
@@ -1730,8 +1730,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
  */
 static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
 {
+       u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
+               IWL_MVM_CMD_QUEUE;
+
        return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
-               ~BIT(IWL_MVM_CMD_QUEUE));
+               ~BIT(cmd_queue));
 }
 
 static inline
@@ -1753,6 +1756,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
        if (!iwl_mvm_has_new_tx_api(mvm))
                iwl_free_fw_paging(mvm);
        mvm->ucode_loaded = false;
+       mvm->fw_dbg_conf = FW_DBG_INVALID;
        iwl_trans_stop_device(mvm->trans);
 }
 
index 9ffff6ed813386418800cf38906b91989c5b6f52..3da5ec40aaead90731224bce3686071ae9dd9344 100644 (file)
@@ -1149,21 +1149,37 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
 
        mutex_lock(&mvm->mutex);
 
-       /* stop recording */
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               /* stop recording */
                iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+
+               iwl_mvm_fw_error_dump(mvm);
+
+               /* start recording again if the firmware is not crashed */
+               if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+                   mvm->fw->dbg_dest_tlv)
+                       iwl_clear_bits_prph(mvm->trans,
+                                           MON_BUFF_SAMPLE_CTL, 0x100);
        } else {
+               u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
+               u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
+
+               /* stop recording */
                iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
-               /* wait before we collect the data till the DBGC stop */
                udelay(100);
-       }
+               iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+               /* wait before we collect the data till the DBGC stop */
+               udelay(500);
 
-       iwl_mvm_fw_error_dump(mvm);
+               iwl_mvm_fw_error_dump(mvm);
 
-       /* start recording again if the firmware is not crashed */
-       WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
-                    mvm->fw->dbg_dest_tlv &&
-                    iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
+               /* start recording again if the firmware is not crashed */
+               if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+                   mvm->fw->dbg_dest_tlv) {
+                       iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
+                       iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
+               }
+       }
 
        mutex_unlock(&mvm->mutex);
 
index 7788eefcd2bdd3066c0b40d5f5575e44f2102c0e..aa785cf3cf68399eb724b33d24d9168602f791a1 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -1083,34 +1083,6 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
                rs_get_lower_rate_in_column(lq_sta, rate);
 }
 
-/* Check if both rates are identical
- * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
- * with a rate indicating STBC/BFER and ANT_AB.
- */
-static inline bool rs_rate_equal(struct rs_rate *a,
-                                struct rs_rate *b,
-                                bool allow_ant_mismatch)
-
-{
-       bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
-               (a->bfer == b->bfer);
-
-       if (allow_ant_mismatch) {
-               if (a->stbc || a->bfer) {
-                       WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
-                                 a->stbc, a->bfer, a->ant);
-                       ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
-               } else if (b->stbc || b->bfer) {
-                       WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
-                                 b->stbc, b->bfer, b->ant);
-                       ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
-               }
-       }
-
-       return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
-               (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
-}
-
 /* Check if both rates share the same column */
 static inline bool rs_rate_column_match(struct rs_rate *a,
                                        struct rs_rate *b)
@@ -1182,12 +1154,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        u32 lq_hwrate;
        struct rs_rate lq_rate, tx_resp_rate;
        struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
-       u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
+       u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
+       u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
+       u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
        u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
-       bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
-                                            IWL_UCODE_TLV_API_LQ_SS_PARAMS);
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -1262,10 +1234,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
 
        /* Here we actually compare this rate to the latest LQ command */
-       if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
+       if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
                IWL_DEBUG_RATE(mvm,
-                              "initial tx resp rate 0x%x does not match 0x%x\n",
-                              tx_resp_hwrate, lq_hwrate);
+                              "tx resp color 0x%x does not match 0x%x\n",
+                              lq_color, LQ_FLAG_COLOR_GET(table->flags));
 
                /*
                 * Since rates mis-match, the last LQ command may have failed.
@@ -3326,6 +3298,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
        u8 valid_tx_ant = 0;
        struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
        bool toggle_ant = false;
+       u32 color;
 
        memcpy(&rate, initial_rate, sizeof(rate));
 
@@ -3380,6 +3353,9 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
                                 num_rates, num_retries, valid_tx_ant,
                                 toggle_ant);
 
+       /* update the color of the LQ command (as a counter at bits 1-3) */
+       color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
+       lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
 }
 
 struct rs_bfer_active_iter_data {
index ee207f2c0a90c797e84659473f0bd1a455b00923..3abde1cb03034f9068230420072961371a187262 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -357,6 +358,20 @@ struct iwl_lq_sta {
        } pers;
 };
 
+/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
+ * Note, it's iwlmvm <-> mac80211 interface.
+ * bits 0-7: reduced tx power
+ * bits 8-10: LQ command's color
+ */
+#define RS_DRV_DATA_TXP_MSK 0xff
+#define RS_DRV_DATA_LQ_COLOR_POS 8
+#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
+                                     RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
+                                 (((uintptr_t)_p) |\
+                                  ((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
+
 /* Initialize station's rate scaling information after adding station */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                          enum nl80211_band band, bool init);
index f5c786ddc52631087b56067807bb4d48e0869664..614d67810d051c539bd093b9452c64dc4e9c8a2f 100644 (file)
@@ -2120,7 +2120,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        if (!iwl_mvm_is_dqa_supported(mvm))
                return 0;
 
-       if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
+       if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+                   vif->type != NL80211_IFTYPE_ADHOC))
                return -ENOTSUPP;
 
        /*
@@ -2155,6 +2156,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                mvmvif->cab_queue = queue;
        } else if (!fw_has_api(&mvm->fw->ucode_capa,
                               IWL_UCODE_TLV_API_STA_TYPE)) {
+               /*
+                * In IBSS, ieee80211_check_queues() sets the cab_queue to be
+                * invalid, so make sure we use the queue we want.
+                * Note that this is done here as we want to avoid making DQA
+                * changes in mac80211 layer.
+                */
+               if (vif->type == NL80211_IFTYPE_ADHOC) {
+                       vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+                       mvmvif->cab_queue = vif->cab_queue;
+               }
                iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
                                   &cfg, timeout);
        }
@@ -3321,18 +3332,15 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
 
        /* Get the station from the mvm local station table */
        mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
-       if (!mvm_sta) {
-               IWL_ERR(mvm, "Failed to find station\n");
-               return -EINVAL;
-       }
-       sta_id = mvm_sta->sta_id;
+       if (mvm_sta)
+               sta_id = mvm_sta->sta_id;
 
        IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
                      keyconf->keyidx, sta_id);
 
-       if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
-           keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
-           keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+       if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+                       keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+                       keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
                return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
 
        if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
index 2716cb5483bf5ab9851e7dbf1e5af04b887296e7..ad62b67dceb2836cefe354fd69af60aedc856f8e 100644 (file)
@@ -313,6 +313,7 @@ enum iwl_mvm_agg_state {
  *     This is basically (last acked packet++).
  * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
  *     Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @lq_color: the color of the LQ command as it appears in tx response.
  * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
  * @state: state of the BA agreement establishment / tear down.
  * @txq_id: Tx queue used by the BA session / DQA
@@ -331,6 +332,7 @@ struct iwl_mvm_tid_data {
        u16 next_reclaimed;
        /* The rest is Tx AGG related */
        u32 rate_n_flags;
+       u8 lq_color;
        bool amsdu_in_ampdu_allowed;
        enum iwl_mvm_agg_state state;
        u16 txq_id;
index f9cbd197246f7ba6ba9e5e0af816cbec54eb9790..506d58104e1cc007ba9d767a16dd77f9df8f0481 100644 (file)
@@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
        struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
        int ret;
 
-       if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
-               return -EIO;
-
        mutex_lock(&mvm->mutex);
 
+       if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+               ret = -EIO;
+               goto unlock;
+       }
+
        if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
                ret = -EINVAL;
                goto unlock;
index bcaceb64a6e8c230c5127ee5cc4b790b1f4f1d57..f21901cd4a4fdf75dac1b55d7b9525e84f9e65dd 100644 (file)
@@ -1323,6 +1323,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
        struct iwl_mvm_sta *mvmsta;
        struct sk_buff_head skbs;
        u8 skb_freed = 0;
+       u8 lq_color;
        u16 next_reclaimed, seq_ctl;
        bool is_ndp = false;
 
@@ -1405,8 +1406,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                info->status.tx_time =
                        le16_to_cpu(tx_resp->wireless_media_time);
                BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+               lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
                info->status.status_driver_data[0] =
-                               (void *)(uintptr_t)tx_resp->reduced_tpc;
+                       RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
 
                ieee80211_tx_status(mvm->hw, skb);
        }
@@ -1638,6 +1640,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
                        le32_to_cpu(tx_resp->initial_rate);
                mvmsta->tid_data[tid].tx_time =
                        le16_to_cpu(tx_resp->wireless_media_time);
+               mvmsta->tid_data[tid].lq_color =
+                       (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
+                       TX_RES_RATE_TABLE_COLOR_POS;
        }
 
        rcu_read_unlock();
@@ -1707,6 +1712,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
        iwl_mvm_check_ratid_empty(mvm, sta, tid);
 
        freed = 0;
+
+       /* pack lq color from tid_data along the reduced txp */
+       ba_info->status.status_driver_data[0] =
+               RS_DRV_DATA_PACK(tid_data->lq_color,
+                                ba_info->status.status_driver_data[0]);
        ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
 
        skb_queue_walk(&reclaimed_skbs, skb) {
index 70acf850a9f19f9750296ae2019d8f4a8c277b67..93cbc7a69bcd55d3560529c88b6a127451606cd7 100644 (file)
@@ -2803,7 +2803,8 @@ static struct iwl_trans_dump_data
 #ifdef CONFIG_PM_SLEEP
 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
 {
-       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+           (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
                return iwl_pci_fw_enter_d0i3(trans);
 
        return 0;
@@ -2811,7 +2812,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
 
 static void iwl_trans_pcie_resume(struct iwl_trans *trans)
 {
-       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+           (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
                iwl_pci_fw_exit_d0i3(trans);
 }
 #endif /* CONFIG_PM_SLEEP */
index 9fb46a6f47cf416e55d4416b228be50cdd460e4e..9c9bfbbabdf11ee597dfa7a3614e59d2e49236fe 100644 (file)
@@ -906,7 +906,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
 
        if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
                ret = -EINVAL;
-               goto error;
+               goto error_free_resp;
        }
 
        rsp = (void *)hcmd.resp_pkt->data;
@@ -915,13 +915,13 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
        if (qid > ARRAY_SIZE(trans_pcie->txq)) {
                WARN_ONCE(1, "queue index %d unsupported", qid);
                ret = -EIO;
-               goto error;
+               goto error_free_resp;
        }
 
        if (test_and_set_bit(qid, trans_pcie->queue_used)) {
                WARN_ONCE(1, "queue %d already used", qid);
                ret = -EIO;
-               goto error;
+               goto error_free_resp;
        }
 
        txq->id = qid;
@@ -934,8 +934,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
                           (txq->write_ptr) | (qid << 16));
        IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
 
+       iwl_free_resp(&hcmd);
        return qid;
 
+error_free_resp:
+       iwl_free_resp(&hcmd);
 error:
        iwl_pcie_gen2_txq_free_memory(trans, txq);
        return ret;
index d5e0906262ead5dd9f202385d7ace81444c25a0f..a60926410438b98c2e414de081f7c8093bac5862 100644 (file)
@@ -925,6 +925,29 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 }
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
+static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
+               u16 bs)
+{
+       struct nvme_ns *ns = disk->private_data;
+       u16 old_ms = ns->ms;
+       u8 pi_type = 0;
+
+       ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
+       ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
+
+       /* PI implementation requires metadata equal t10 pi tuple size */
+       if (ns->ms == sizeof(struct t10_pi_tuple))
+               pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+
+       if (blk_get_integrity(disk) &&
+           (ns->pi_type != pi_type || ns->ms != old_ms ||
+            bs != queue_logical_block_size(disk->queue) ||
+            (ns->ms && ns->ext)))
+               blk_integrity_unregister(disk);
+
+       ns->pi_type = pi_type;
+}
+
 static void nvme_init_integrity(struct nvme_ns *ns)
 {
        struct blk_integrity integrity;
@@ -951,6 +974,10 @@ static void nvme_init_integrity(struct nvme_ns *ns)
        blk_queue_max_integrity_segments(ns->queue, 1);
 }
 #else
+static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
+               u16 bs)
+{
+}
 static void nvme_init_integrity(struct nvme_ns *ns)
 {
 }
@@ -997,37 +1024,22 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
 {
        struct nvme_ns *ns = disk->private_data;
-       u8 lbaf, pi_type;
-       u16 old_ms;
-       unsigned short bs;
-
-       old_ms = ns->ms;
-       lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
-       ns->lba_shift = id->lbaf[lbaf].ds;
-       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
-       ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
+       u16 bs;
 
        /*
         * If identify namespace failed, use default 512 byte block size so
         * block layer can use before failing read/write for 0 capacity.
         */
+       ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
        if (ns->lba_shift == 0)
                ns->lba_shift = 9;
        bs = 1 << ns->lba_shift;
-       /* XXX: PI implementation requires metadata equal t10 pi tuple size */
-       pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
-                                       id->dps & NVME_NS_DPS_PI_MASK : 0;
 
        blk_mq_freeze_queue(disk->queue);
-       if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
-                               ns->ms != old_ms ||
-                               bs != queue_logical_block_size(disk->queue) ||
-                               (ns->ms && ns->ext)))
-               blk_integrity_unregister(disk);
 
-       ns->pi_type = pi_type;
+       if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
+               nvme_prep_integrity(disk, id, bs);
        blk_queue_logical_block_size(ns->queue, bs);
-
        if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
                nvme_init_integrity(ns);
        if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
@@ -1605,7 +1617,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        }
        memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
 
-       if (ctrl->ops->is_fabrics) {
+       if (ctrl->ops->flags & NVME_F_FABRICS) {
                ctrl->icdoff = le16_to_cpu(id->icdoff);
                ctrl->ioccsz = le32_to_cpu(id->ioccsz);
                ctrl->iorcsz = le32_to_cpu(id->iorcsz);
@@ -2098,7 +2110,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                if (ns->ndev)
                        nvme_nvm_unregister_sysfs(ns);
                del_gendisk(ns->disk);
-               blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
        }
 
@@ -2436,8 +2447,16 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
                        continue;
                revalidate_disk(ns->disk);
                blk_set_queue_dying(ns->queue);
-               blk_mq_abort_requeue_list(ns->queue);
-               blk_mq_start_stopped_hw_queues(ns->queue, true);
+
+               /*
+                * Forcibly start all queues to avoid having stuck requests.
+                * Note that we must ensure the queues are not stopped
+                * when the final removal happens.
+                */
+               blk_mq_start_hw_queues(ns->queue);
+
+               /* draining requests in requeue list */
+               blk_mq_kick_requeue_list(ns->queue);
        }
        mutex_unlock(&ctrl->namespaces_mutex);
 }
index dca7165fabcf9ce5df19fee1007e8da8bd794e21..5b14cbefb7240d5e7d50bb1ade8fd958417282e8 100644 (file)
@@ -45,8 +45,6 @@ enum nvme_fc_queue_flags {
 
 #define NVMEFC_QUEUE_DELAY     3               /* ms units */
 
-#define NVME_FC_MAX_CONNECT_ATTEMPTS   1
-
 struct nvme_fc_queue {
        struct nvme_fc_ctrl     *ctrl;
        struct device           *dev;
@@ -165,8 +163,6 @@ struct nvme_fc_ctrl {
        struct work_struct      delete_work;
        struct work_struct      reset_work;
        struct delayed_work     connect_work;
-       int                     reconnect_delay;
-       int                     connect_attempts;
 
        struct kref             ref;
        u32                     flags;
@@ -1376,9 +1372,9 @@ done:
        complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
        if (!complete_rq) {
                if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
-                       status = cpu_to_le16(NVME_SC_ABORT_REQ);
+                       status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
                        if (blk_queue_dying(rq->q))
-                               status |= cpu_to_le16(NVME_SC_DNR);
+                               status |= cpu_to_le16(NVME_SC_DNR << 1);
                }
                nvme_end_request(rq, status, result);
        } else
@@ -1751,7 +1747,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
        dev_warn(ctrl->ctrl.device,
                "NVME-FC{%d}: transport association error detected: %s\n",
                ctrl->cnum, errmsg);
-       dev_info(ctrl->ctrl.device,
+       dev_warn(ctrl->ctrl.device,
                "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
 
        /* stop the queues on error, cleanup is in reset thread */
@@ -2195,9 +2191,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
        if (!opts->nr_io_queues)
                return 0;
 
-       dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
-                       opts->nr_io_queues);
-
        nvme_fc_init_io_queues(ctrl);
 
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
@@ -2268,9 +2261,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
        if (ctrl->queue_count == 1)
                return 0;
 
-       dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
-                       opts->nr_io_queues);
-
        nvme_fc_init_io_queues(ctrl);
 
        ret = blk_mq_reinit_tagset(&ctrl->tag_set);
@@ -2306,7 +2296,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        int ret;
        bool changed;
 
-       ctrl->connect_attempts++;
+       ++ctrl->ctrl.opts->nr_reconnects;
 
        /*
         * Create the admin queue
@@ -2403,9 +2393,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
        WARN_ON_ONCE(!changed);
 
-       ctrl->connect_attempts = 0;
-
-       kref_get(&ctrl->ctrl.kref);
+       ctrl->ctrl.opts->nr_reconnects = 0;
 
        if (ctrl->queue_count > 1) {
                nvme_start_queues(&ctrl->ctrl);
@@ -2536,26 +2524,32 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
 
        /*
         * tear down the controller
-        * This will result in the last reference on the nvme ctrl to
-        * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback.
-        * From there, the transport will tear down it's logical queues and
-        * association.
+        * After the last reference on the nvme ctrl is removed,
+        * the transport nvme_fc_nvme_ctrl_freed() callback will be
+        * invoked. From there, the transport will tear down it's
+        * logical queues and association.
         */
        nvme_uninit_ctrl(&ctrl->ctrl);
 
        nvme_put_ctrl(&ctrl->ctrl);
 }
 
-static int
-__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
+static bool
+__nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
 {
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
-               return -EBUSY;
+               return true;
 
        if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
-               return -EBUSY;
+               return true;
 
-       return 0;
+       return false;
+}
+
+static int
+__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+       return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0;
 }
 
 /*
@@ -2580,6 +2574,35 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
        return ret;
 }
 
+static void
+nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
+{
+       /* If we are resetting/deleting then do nothing */
+       if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
+               WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
+                       ctrl->ctrl.state == NVME_CTRL_LIVE);
+               return;
+       }
+
+       dev_info(ctrl->ctrl.device,
+               "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
+               ctrl->cnum, status);
+
+       if (nvmf_should_reconnect(&ctrl->ctrl)) {
+               dev_info(ctrl->ctrl.device,
+                       "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
+                       ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
+               queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
+                               ctrl->ctrl.opts->reconnect_delay * HZ);
+       } else {
+               dev_warn(ctrl->ctrl.device,
+                               "NVME-FC{%d}: Max reconnect attempts (%d) "
+                               "reached. Removing controller\n",
+                               ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
+               WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
+       }
+}
+
 static void
 nvme_fc_reset_ctrl_work(struct work_struct *work)
 {
@@ -2591,34 +2614,9 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
        nvme_fc_delete_association(ctrl);
 
        ret = nvme_fc_create_association(ctrl);
-       if (ret) {
-               dev_warn(ctrl->ctrl.device,
-                       "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
-                       ctrl->cnum, ret);
-               if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
-                       dev_warn(ctrl->ctrl.device,
-                               "NVME-FC{%d}: Max reconnect attempts (%d) "
-                               "reached. Removing controller\n",
-                               ctrl->cnum, ctrl->connect_attempts);
-
-                       if (!nvme_change_ctrl_state(&ctrl->ctrl,
-                               NVME_CTRL_DELETING)) {
-                               dev_err(ctrl->ctrl.device,
-                                       "NVME-FC{%d}: failed to change state "
-                                       "to DELETING\n", ctrl->cnum);
-                               return;
-                       }
-
-                       WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
-                       return;
-               }
-
-               dev_warn(ctrl->ctrl.device,
-                       "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
-                       ctrl->cnum, ctrl->reconnect_delay);
-               queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
-                               ctrl->reconnect_delay * HZ);
-       } else
+       if (ret)
+               nvme_fc_reconnect_or_delete(ctrl, ret);
+       else
                dev_info(ctrl->ctrl.device,
                        "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
 }
@@ -2632,7 +2630,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
 {
        struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
 
-       dev_warn(ctrl->ctrl.device,
+       dev_info(ctrl->ctrl.device,
                "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
@@ -2649,7 +2647,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
        .name                   = "fc",
        .module                 = THIS_MODULE,
-       .is_fabrics             = true,
+       .flags                  = NVME_F_FABRICS,
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
@@ -2671,34 +2669,9 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
                                struct nvme_fc_ctrl, connect_work);
 
        ret = nvme_fc_create_association(ctrl);
-       if (ret) {
-               dev_warn(ctrl->ctrl.device,
-                       "NVME-FC{%d}: Reconnect attempt failed (%d)\n",
-                       ctrl->cnum, ret);
-               if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
-                       dev_warn(ctrl->ctrl.device,
-                               "NVME-FC{%d}: Max reconnect attempts (%d) "
-                               "reached. Removing controller\n",
-                               ctrl->cnum, ctrl->connect_attempts);
-
-                       if (!nvme_change_ctrl_state(&ctrl->ctrl,
-                               NVME_CTRL_DELETING)) {
-                               dev_err(ctrl->ctrl.device,
-                                       "NVME-FC{%d}: failed to change state "
-                                       "to DELETING\n", ctrl->cnum);
-                               return;
-                       }
-
-                       WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
-                       return;
-               }
-
-               dev_warn(ctrl->ctrl.device,
-                       "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
-                       ctrl->cnum, ctrl->reconnect_delay);
-               queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
-                               ctrl->reconnect_delay * HZ);
-       } else
+       if (ret)
+               nvme_fc_reconnect_or_delete(ctrl, ret);
+       else
                dev_info(ctrl->ctrl.device,
                        "NVME-FC{%d}: controller reconnect complete\n",
                        ctrl->cnum);
@@ -2755,7 +2728,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
        INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
        INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
-       ctrl->reconnect_delay = opts->reconnect_delay;
        spin_lock_init(&ctrl->lock);
 
        /* io queue count */
@@ -2819,7 +2791,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
                ctrl->ctrl.opts = NULL;
                /* initiate nvme ctrl ref counting teardown */
                nvme_uninit_ctrl(&ctrl->ctrl);
-               nvme_put_ctrl(&ctrl->ctrl);
 
                /* as we're past the point where we transition to the ref
                 * counting teardown path, if we return a bad pointer here,
@@ -2835,6 +2806,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
                return ERR_PTR(ret);
        }
 
+       kref_get(&ctrl->ctrl.kref);
+
        dev_info(ctrl->ctrl.device,
                "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
                ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
@@ -2971,7 +2944,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
 static struct nvmf_transport_ops nvme_fc_transport = {
        .name           = "fc",
        .required_opts  = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
-       .allowed_opts   = NVMF_OPT_RECONNECT_DELAY,
+       .allowed_opts   = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
        .create_ctrl    = nvme_fc_create_ctrl,
 };
 
index 29c708ca9621c4622ab3f32d153c02546e9d0ca6..9d6a070d43914dcc5388b2a7a03f477a00b8bd1f 100644 (file)
@@ -208,7 +208,9 @@ struct nvme_ns {
 struct nvme_ctrl_ops {
        const char *name;
        struct module *module;
-       bool is_fabrics;
+       unsigned int flags;
+#define NVME_F_FABRICS                 (1 << 0)
+#define NVME_F_METADATA_SUPPORTED      (1 << 1)
        int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
        int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
        int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
index 4c2ff2bb26bcd7c615e40ae777327a3ba401cf3d..d52701df72457d0fa2b85a168c500fd022b8b717 100644 (file)
@@ -263,7 +263,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
        c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
 
        if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
-               dev_warn(dev->dev, "unable to set dbbuf\n");
+               dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
                /* Free memory and continue on */
                nvme_dbbuf_dma_free(dev);
        }
@@ -1394,11 +1394,11 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
        result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
                                      &pci_status);
        if (result == PCIBIOS_SUCCESSFUL)
-               dev_warn(dev->dev,
+               dev_warn(dev->ctrl.device,
                         "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
                         csts, pci_status);
        else
-               dev_warn(dev->dev,
+               dev_warn(dev->ctrl.device,
                         "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
                         csts, result);
 }
@@ -1740,8 +1740,8 @@ static int nvme_pci_enable(struct nvme_dev *dev)
         */
        if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
                dev->q_depth = 2;
-               dev_warn(dev->dev, "detected Apple NVMe controller, set "
-                       "queue depth=%u to work around controller resets\n",
+               dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
+                       "set queue depth=%u to work around controller resets\n",
                        dev->q_depth);
        }
 
@@ -1759,7 +1759,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
                if (dev->cmbsz) {
                        if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
                                                    &dev_attr_cmb.attr, NULL))
-                               dev_warn(dev->dev,
+                               dev_warn(dev->ctrl.device,
                                         "failed to add sysfs attribute for CMB\n");
                }
        }
@@ -2047,6 +2047,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
        .name                   = "pcie",
        .module                 = THIS_MODULE,
+       .flags                  = NVME_F_METADATA_SUPPORTED,
        .reg_read32             = nvme_pci_reg_read32,
        .reg_write32            = nvme_pci_reg_write32,
        .reg_read64             = nvme_pci_reg_read64,
@@ -2293,6 +2294,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_VDEVICE(INTEL, 0x0a54),
                .driver_data = NVME_QUIRK_STRIPE_SIZE |
                                NVME_QUIRK_DEALLOCATE_ZEROES, },
+       { PCI_VDEVICE(INTEL, 0xf1a5),   /* Intel 600P/P3100 */
+               .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
        { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
                .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
        { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
index dd1c6deef82fcf509b5c04af20d9df1c17746d9b..28bd255c144dcca10aa60cede2c9a51cd101426a 100644 (file)
@@ -1038,6 +1038,19 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
                nvme_rdma_wr_error(cq, wc, "SEND");
 }
 
+static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
+{
+       int sig_limit;
+
+       /*
+        * We signal completion every queue depth/2 and also handle the
+        * degenerated case of a  device with queue_depth=1, where we
+        * would need to signal every message.
+        */
+       sig_limit = max(queue->queue_size / 2, 1);
+       return (++queue->sig_count % sig_limit) == 0;
+}
+
 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
                struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
                struct ib_send_wr *first, bool flush)
@@ -1065,9 +1078,6 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
         * Would have been way to obvious to handle this in hardware or
         * at least the RDMA stack..
         *
-        * This messy and racy code sniplet is copy and pasted from the iSER
-        * initiator, and the magic '32' comes from there as well.
-        *
         * Always signal the flushes. The magic request used for the flush
         * sequencer is not allocated in our driver's tagset and it's
         * triggered to be freed by blk_cleanup_queue(). So we need to
@@ -1075,7 +1085,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
         * embedded in request's payload, is not freed when __ib_process_cq()
         * calls wr_cqe->done().
         */
-       if ((++queue->sig_count % 32) == 0 || flush)
+       if (nvme_rdma_queue_sig_limit(queue) || flush)
                wr.send_flags |= IB_SEND_SIGNALED;
 
        if (first)
@@ -1782,7 +1792,7 @@ static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
        .name                   = "rdma",
        .module                 = THIS_MODULE,
-       .is_fabrics             = true,
+       .flags                  = NVME_F_FABRICS,
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
index feb497134aeea662092424804073be9ed104c464..e503cfff03372fb9cc7605c800743dd4c5891318 100644 (file)
@@ -558,7 +558,7 @@ static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
        .name                   = "loop",
        .module                 = THIS_MODULE,
-       .is_fabrics             = true,
+       .flags                  = NVME_F_FABRICS,
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
index 71fecc2debfc940affba38afc6c912f3a4aa8b4b..703a42118ffc907571f2da5300639b725c2d529f 100644 (file)
@@ -523,7 +523,7 @@ static int __init of_platform_default_populate_init(void)
 arch_initcall_sync(of_platform_default_populate_init);
 #endif
 
-static int of_platform_device_destroy(struct device *dev, void *data)
+int of_platform_device_destroy(struct device *dev, void *data)
 {
        /* Do not touch devices not populated from the device tree */
        if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED))
@@ -544,6 +544,7 @@ static int of_platform_device_destroy(struct device *dev, void *data)
        of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
        return 0;
 }
+EXPORT_SYMBOL_GPL(of_platform_device_destroy);
 
 /**
  * of_platform_depopulate() - Remove devices populated from device tree
index a98cba55c7f02e670436787c011a85a8772f5109..19a289b8cc944fefb6e2bb3b627b61a0a2eff3ed 100644 (file)
@@ -252,7 +252,34 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
 static int imx6q_pcie_abort_handler(unsigned long addr,
                unsigned int fsr, struct pt_regs *regs)
 {
-       return 0;
+       unsigned long pc = instruction_pointer(regs);
+       unsigned long instr = *(unsigned long *)pc;
+       int reg = (instr >> 12) & 15;
+
+       /*
+        * If the instruction being executed was a read,
+        * make it look like it read all-ones.
+        */
+       if ((instr & 0x0c100000) == 0x04100000) {
+               unsigned long val;
+
+               if (instr & 0x00400000)
+                       val = 255;
+               else
+                       val = -1;
+
+               regs->uregs[reg] = val;
+               regs->ARM_pc += 4;
+               return 0;
+       }
+
+       if ((instr & 0x0e100090) == 0x00100090) {
+               regs->uregs[reg] = -1;
+               regs->ARM_pc += 4;
+               return 0;
+       }
+
+       return 1;
 }
 
 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
@@ -819,8 +846,8 @@ static int __init imx6_pcie_init(void)
         * we can install the handler here without risking it
         * accessing some uninitialized driver state.
         */
-       hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
-                       "imprecise external abort");
+       hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
+                       "external abort on non-linefetch");
 
        return platform_driver_register(&imx6_pcie_driver);
 }
index c23f146fb5a668b3a5f5ae27b0bb4f00a8057d7f..c09623ca8c3b14b9b522c471fa9ac118c9522106 100644 (file)
@@ -6,6 +6,7 @@ menu "PCI Endpoint"
 
 config PCI_ENDPOINT
        bool "PCI Endpoint Support"
+       depends on HAS_DMA
        help
           Enable this configuration option to support configurable PCI
           endpoint. This should be enabled if the platform has a PCI
index b01bd5bba8e604709c3b51f14f9fbdb796a2c4fb..563901cd9c06b839f4eea9a74ed78f76309f7f62 100644 (file)
@@ -2144,7 +2144,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
 
        if (!pm_runtime_suspended(dev)
            || pci_target_state(pci_dev) != pci_dev->current_state
-           || platform_pci_need_resume(pci_dev))
+           || platform_pci_need_resume(pci_dev)
+           || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
                return false;
 
        /*
index cc6e085008fb9c6e72244d9e6a588864c3f2962b..f6a63406c76e0b170e348c882abcf2723f2d401d 100644 (file)
@@ -1291,7 +1291,6 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
        cdev = &stdev->cdev;
        cdev_init(cdev, &switchtec_fops);
        cdev->owner = THIS_MODULE;
-       cdev->kobj.parent = &dev->kobj;
 
        return stdev;
 
@@ -1442,12 +1441,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
        stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
        stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
        stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
-       stdev->partition = ioread8(&stdev->mmio_ntb->partition_id);
+       stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
        stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
        stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
        stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
        stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
 
+       if (stdev->partition_count < 1)
+               stdev->partition_count = 1;
+
        init_pff(stdev);
 
        pci_set_drvdata(pdev, stdev);
@@ -1479,11 +1481,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
                  SWITCHTEC_EVENT_EN_IRQ,
                  &stdev->mmio_part_cfg->mrpc_comp_hdr);
 
-       rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1);
-       if (rc)
-               goto err_put;
-
-       rc = device_add(&stdev->dev);
+       rc = cdev_device_add(&stdev->cdev, &stdev->dev);
        if (rc)
                goto err_devadd;
 
@@ -1492,7 +1490,6 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
        return 0;
 
 err_devadd:
-       cdev_del(&stdev->cdev);
        stdev_kill(stdev);
 err_put:
        ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
@@ -1506,8 +1503,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
 
        pci_set_drvdata(pdev, NULL);
 
-       device_del(&stdev->dev);
-       cdev_del(&stdev->cdev);
+       cdev_device_del(&stdev->cdev, &stdev->dev);
        ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
        dev_info(&stdev->dev, "unregistered.\n");
 
index 34c862f213c7e85554830b741cdf051f1fbbd86d..0a9b78705ee810c9e18c6fa1f46551fc27374287 100644 (file)
@@ -29,6 +29,17 @@ static int arm_pmu_acpi_register_irq(int cpu)
                return -EINVAL;
 
        gsi = gicc->performance_interrupt;
+
+       /*
+        * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
+        * have an interrupt. QEMU advertises this by using a GSI of zero,
+        * which is not known to be valid on any hardware despite being
+        * valid per the spec. Take the pragmatic approach and reject a
+        * GSI of zero for now.
+        */
+       if (!gsi)
+               return 0;
+
        if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
                trigger = ACPI_EDGE_SENSITIVE;
        else
index 1653cbda6a8299b33b5cebae92bd4710e41412a4..bd459a93b0e7e9b11c999dd4bf9b95c3500be3e2 100644 (file)
@@ -680,30 +680,16 @@ EXPORT_SYMBOL_GPL(pinctrl_generic_remove_group);
  * pinctrl_generic_free_groups() - removes all pin groups
  * @pctldev: pin controller device
  *
- * Note that the caller must take care of locking.
+ * Note that the caller must take care of locking. The pinctrl groups
+ * are allocated with devm_kzalloc() so no need to free them here.
  */
 static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev)
 {
        struct radix_tree_iter iter;
-       struct group_desc *group;
-       unsigned long *indices;
        void **slot;
-       int i = 0;
-
-       indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
-                              pctldev->num_groups, GFP_KERNEL);
-       if (!indices)
-               return;
 
        radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0)
-               indices[i++] = iter.index;
-
-       for (i = 0; i < pctldev->num_groups; i++) {
-               group = radix_tree_lookup(&pctldev->pin_group_tree,
-                                         indices[i]);
-               radix_tree_delete(&pctldev->pin_group_tree, indices[i]);
-               devm_kfree(pctldev->dev, group);
-       }
+               radix_tree_delete(&pctldev->pin_group_tree, iter.index);
 
        pctldev->num_groups = 0;
 }
index 41b5b07d5a2bf51f6b0623597c294862910de78c..6852010a6d708b5010555cbb141bf57ac31de077 100644 (file)
@@ -194,6 +194,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
        return 0;
 }
 
+static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
+{
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp &= ~(mask << shift);
+       tmp |= value << shift;
+       writel(tmp, reg);
+}
+
 static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
                               unsigned group)
 {
@@ -211,8 +221,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
                reg += bank * 0x20 + pin / 16 * 0x10;
                shift = pin % 16 * 2;
 
-               writel(0x3 << shift, reg + CLR);
-               writel(g->muxsel[i] << shift, reg + SET);
+               mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
        }
 
        return 0;
@@ -279,8 +288,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
                        /* mA */
                        if (config & MA_PRESENT) {
                                shift = pin % 8 * 4;
-                               writel(0x3 << shift, reg + CLR);
-                               writel(ma << shift, reg + SET);
+                               mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
                        }
 
                        /* vol */
index 2debba62fac90d956ce37cd09805c518ee4a8da5..20f1b44939944614ff270c757fc7152f901e9f09 100644 (file)
@@ -1539,15 +1539,29 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
  * is not listed below.
  */
 static const struct dmi_system_id chv_no_valid_mask[] = {
+       /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
        {
-               /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
-               .ident = "Acer Chromebook (CYAN)",
+               .ident = "Intel_Strago based Chromebooks (All models)",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
-                       DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
                },
-       }
+       },
+       {
+               .ident = "Acer Chromebook R11 (Cyan)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
+               },
+       },
+       {
+               .ident = "Samsung Chromebook 3 (Celes)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+               },
+       },
+       {}
 };
 
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
index 0d6b7f4b82af34a2a85e51c924ca8420bd7a6268..720a19fd38d2c6c24d30e5e1c7dcdf70cae0e590 100644 (file)
@@ -35,7 +35,6 @@ static const struct pin_config_item conf_items[] = {
        PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
                                "input bias pull to pin specific state", NULL, false),
        PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
-       PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
@@ -161,7 +160,6 @@ static const struct pinconf_generic_params dt_params[] = {
        { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 },
        { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 },
        { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 },
-       { "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 },
        { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 },
        { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 },
        { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 },
@@ -174,7 +172,6 @@ static const struct pinconf_generic_params dt_params[] = {
        { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
        { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
        { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
-       { "output-enable", PIN_CONFIG_OUTPUT, 1, },
        { "output-high", PIN_CONFIG_OUTPUT, 1, },
        { "output-low", PIN_CONFIG_OUTPUT, 0, },
        { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
index 9fd6d9087dc508ca7731d7f1e868988e0e320cc2..16b3ae5e4f440c4769db55ebf7c61ebae7e1e5c1 100644 (file)
@@ -826,30 +826,17 @@ EXPORT_SYMBOL_GPL(pinmux_generic_remove_function);
  * pinmux_generic_free_functions() - removes all functions
  * @pctldev: pin controller device
  *
- * Note that the caller must take care of locking.
+ * Note that the caller must take care of locking. The pinctrl
+ * functions are allocated with devm_kzalloc() so no need to free
+ * them here.
  */
 void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
 {
        struct radix_tree_iter iter;
-       struct function_desc *function;
-       unsigned long *indices;
        void **slot;
-       int i = 0;
-
-       indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
-                              pctldev->num_functions, GFP_KERNEL);
-       if (!indices)
-               return;
 
        radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0)
-               indices[i++] = iter.index;
-
-       for (i = 0; i < pctldev->num_functions; i++) {
-               function = radix_tree_lookup(&pctldev->pin_function_tree,
-                                            indices[i]);
-               radix_tree_delete(&pctldev->pin_function_tree, indices[i]);
-               devm_kfree(pctldev->dev, function);
-       }
+               radix_tree_delete(&pctldev->pin_function_tree, iter.index);
 
        pctldev->num_functions = 0;
 }
index 9aec1d2232dd830e2c19a8e1e394e6033e621c21..6624499eae72f5c2ba986c8c54c6f7e583f05f2a 100644 (file)
@@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x3, "owa")),          /* DOUT */
+                 SUNXI_FUNCTION(0x3, "spdif")),        /* DOUT */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out")),
index 14bde0db8c245680fd010bf2f0aa39093af5a4c2..5b10b50f8686f953a5fe476f3131b0d8a42307bb 100644 (file)
@@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone(
 
        power_zone->id = result;
        idr_init(&power_zone->idr);
+       result = -ENOMEM;
        power_zone->name = kstrdup(name, GFP_KERNEL);
        if (!power_zone->name)
                goto err_name_alloc;
index 35ce53edabf90009efcd228e71a109e410337708..d5e5229308f2291136ebe7c6061346507a733907 100644 (file)
@@ -155,3 +155,5 @@ static int __init hi6220_reset_init(void)
 }
 
 postcore_initcall(hi6220_reset_init);
+
+MODULE_LICENSE("GPL v2");
index b3de973a62607de6812615e81ac1d00bce5c1f1a..9dca53df35845cc64366a5428d9927bff7f28220 100644 (file)
@@ -1088,7 +1088,7 @@ static u32 rtc_handler(void *context)
        }
        spin_unlock_irqrestore(&rtc_lock, flags);
 
-       pm_wakeup_event(dev, 0);
+       pm_wakeup_hard_event(dev);
        acpi_clear_event(ACPI_EVENT_RTC);
        acpi_disable_event(ACPI_EVENT_RTC, 0);
        return ACPI_INTERRUPT_HANDLED;
index 622bdabc88941430f18ed65b0d70fd9fb0478b9b..dab195f04da78f46921f4eaf044c3f7e953c9a65 100644 (file)
@@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
                goto bye;
        }
 
-       mempool_free(mbp, hw->mb_mempool);
        if (finicsum != cfcsum) {
                csio_warn(hw,
                      "Config File checksum mismatch: csum=%#x, computed=%#x\n",
@@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
        rv = csio_hw_validate_caps(hw, mbp);
        if (rv != 0)
                goto bye;
+
+       mempool_free(mbp, hw->mb_mempool);
+       mbp = NULL;
+
        /*
         * Note that we're operating with parameters
         * not supplied by the driver, rather than from hard-wired
index bd7d39ecbd2470246a58a8ed3a3fd11367df814f..fb06974c88c15c2b23864e44779e7d61826546bf 100644 (file)
@@ -1873,6 +1873,11 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
        tcp_task->dd_data = tdata;
        task->hdr = NULL;
 
+       if (tdata->skb) {
+               kfree_skb(tdata->skb);
+               tdata->skb = NULL;
+       }
+
        if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
            (opcode == ISCSI_OP_SCSI_DATA_OUT ||
             (opcode == ISCSI_OP_SCSI_CMD &&
@@ -1890,6 +1895,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
                return -ENOMEM;
        }
 
+       skb_get(tdata->skb);
        skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
        task->hdr = (struct iscsi_hdr *)tdata->skb->data;
        task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
@@ -2035,9 +2041,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
        unsigned int datalen;
        int err;
 
-       if (!skb) {
+       if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) {
                log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
-                       "task 0x%p, skb NULL.\n", task);
+                       "task 0x%p, skb 0x%p\n", task, skb);
                return 0;
        }
 
@@ -2050,7 +2056,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
        }
 
        datalen = skb->data_len;
-       tdata->skb = NULL;
 
        /* write ppod first if using ofldq to write ppod */
        if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) {
@@ -2078,6 +2083,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
                        pdulen += ISCSI_DIGEST_SIZE;
 
                task->conn->txdata_octets += pdulen;
+               cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE);
                return 0;
        }
 
@@ -2086,7 +2092,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
                        "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
                        task, skb, skb->len, skb->data_len, err);
                /* reset skb to send when we are called again */
-               tdata->skb = skb;
                return err;
        }
 
@@ -2094,7 +2099,8 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
                "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
                task->itt, skb, skb->len, skb->data_len, err);
 
-       kfree_skb(skb);
+       __kfree_skb(tdata->skb);
+       tdata->skb = NULL;
 
        iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
        iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
@@ -2113,8 +2119,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
 
        tcp_task->dd_data = NULL;
        /*  never reached the xmit task callout */
-       if (tdata->skb)
-               __kfree_skb(tdata->skb);
+       if (tdata->skb) {
+               kfree_skb(tdata->skb);
+               tdata->skb = NULL;
+       }
 
        task_release_itt(task, task->hdr_itt);
        memset(tdata, 0, sizeof(*tdata));
@@ -2714,6 +2722,9 @@ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
 static int __init libcxgbi_init_module(void)
 {
        pr_info("%s", version);
+
+       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+                    sizeof(struct cxgbi_skb_cb));
        return 0;
 }
 
index 18e0ea83d36172cf2fcc55ecfe8f3b133913f1d5..239462a7576051dca167ad246ba34f07777209cf 100644 (file)
@@ -195,7 +195,8 @@ struct cxgbi_skb_rx_cb {
 };
 
 struct cxgbi_skb_tx_cb {
-       void *l2t;
+       void *handle;
+       void *arp_err_handler;
        struct sk_buff *wr_next;
 };
 
@@ -203,6 +204,7 @@ enum cxgbi_skcb_flags {
        SKCBF_TX_NEED_HDR,      /* packet needs a header */
        SKCBF_TX_MEM_WRITE,     /* memory write */
        SKCBF_TX_FLAG_COMPL,    /* wr completion flag */
+       SKCBF_TX_DONE,          /* skb tx done */
        SKCBF_RX_COALESCED,     /* received whole pdu */
        SKCBF_RX_HDR,           /* received pdu header */
        SKCBF_RX_DATA,          /* received pdu payload */
@@ -215,13 +217,13 @@ enum cxgbi_skcb_flags {
 };
 
 struct cxgbi_skb_cb {
-       unsigned char ulp_mode;
-       unsigned long flags;
-       unsigned int seq;
        union {
                struct cxgbi_skb_rx_cb rx;
                struct cxgbi_skb_tx_cb tx;
        };
+       unsigned char ulp_mode;
+       unsigned long flags;
+       unsigned int seq;
 };
 
 #define CXGBI_SKB_CB(skb)      ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
@@ -374,11 +376,9 @@ static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
        cxgbi_skcb_tx_wr_next(skb) = NULL;
        /*
         * We want to take an extra reference since both us and the driver
-        * need to free the packet before it's really freed. We know there's
-        * just one user currently so we use atomic_set rather than skb_get
-        * to avoid the atomic op.
+        * need to free the packet before it's really freed.
         */
-       atomic_set(&skb->users, 2);
+       skb_get(skb);
 
        if (!csk->wr_pending_head)
                csk->wr_pending_head = skb;
index 3cbab8710e58133ead0cb173cb60ea7a4cafc8e2..2ceff585f1896d9762fd288a93f4bcfde0bf2623 100644 (file)
@@ -265,18 +265,16 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
                                      struct list_head *list,
                                      unsigned char *cdb)
 {
-       struct scsi_device *sdev = ctlr->ms_sdev;
-       struct rdac_dh_data *h = sdev->handler_data;
        struct rdac_mode_common *common;
        unsigned data_size;
        struct rdac_queue_data *qdata;
        u8 *lun_table;
 
-       if (h->ctlr->use_ms10) {
+       if (ctlr->use_ms10) {
                struct rdac_pg_expanded *rdac_pg;
 
                data_size = sizeof(struct rdac_pg_expanded);
-               rdac_pg = &h->ctlr->mode_select.expanded;
+               rdac_pg = &ctlr->mode_select.expanded;
                memset(rdac_pg, 0, data_size);
                common = &rdac_pg->common;
                rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
@@ -288,7 +286,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
                struct rdac_pg_legacy *rdac_pg;
 
                data_size = sizeof(struct rdac_pg_legacy);
-               rdac_pg = &h->ctlr->mode_select.legacy;
+               rdac_pg = &ctlr->mode_select.legacy;
                memset(rdac_pg, 0, data_size);
                common = &rdac_pg->common;
                rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
@@ -304,7 +302,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
        }
 
        /* Prepare the command. */
-       if (h->ctlr->use_ms10) {
+       if (ctlr->use_ms10) {
                cdb[0] = MODE_SELECT_10;
                cdb[7] = data_size >> 8;
                cdb[8] = data_size & 0xff;
index d390325c99ecf9487c9b4441fd0e25aec05378c7..abf6026645dd2308fba55179ca750b0e786da5fa 100644 (file)
@@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
                cmd = list_first_entry_or_null(&vscsi->free_cmd,
                                               struct ibmvscsis_cmd, list);
                if (cmd) {
+                       if (cmd->abort_cmd)
+                               cmd->abort_cmd = NULL;
                        cmd->flags &= ~(DELAY_SEND);
                        list_del(&cmd->list);
                        cmd->iue = iue;
@@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
                                if (cmd->abort_cmd) {
                                        retry = true;
                                        cmd->abort_cmd->flags &= ~(DELAY_SEND);
+                                       cmd->abort_cmd = NULL;
                                }
 
                                /*
@@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
                                        list_del(&cmd->list);
                                        ibmvscsis_free_cmd_resources(vscsi,
                                                                     cmd);
+                                       /*
+                                        * With a successfully aborted op
+                                        * through LIO we want to increment the
+                                        * the vscsi credit so that when we dont
+                                        * send a rsp to the original scsi abort
+                                        * op (h_send_crq), but the tm rsp to
+                                        * the abort is sent, the credit is
+                                        * correctly sent with the abort tm rsp.
+                                        * We would need 1 for the abort tm rsp
+                                        * and 1 credit for the aborted scsi op.
+                                        * Thus we need to increment here.
+                                        * Also we want to increment the credit
+                                        * here because we want to make sure
+                                        * cmd is actually released first
+                                        * otherwise the client will think it
+                                        * it can send a new cmd, and we could
+                                        * find ourselves short of cmd elements.
+                                        */
+                                       vscsi->credit += 1;
                                } else {
                                        iue = cmd->iue;
 
@@ -2962,10 +2984,7 @@ static long srp_build_response(struct scsi_info *vscsi,
 
        rsp->opcode = SRP_RSP;
 
-       if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
-               rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
-       else
-               rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+       rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
        rsp->tag = cmd->rsp.tag;
        rsp->flags = 0;
 
index b44c3136eb5181311f12f982fa1ab77b5e95a5f5..520325867e2b4c05528bd89a7eeaccea2f5c6f94 100644 (file)
@@ -1422,7 +1422,7 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
        fp = fc_frame_alloc(lport, sizeof(*rtv));
        if (!fp) {
                rjt_data.reason = ELS_RJT_UNAB;
-               rjt_data.reason = ELS_EXPL_INSUF_RES;
+               rjt_data.explan = ELS_EXPL_INSUF_RES;
                fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
                goto drop;
        }
index 6d7840b096e6f0899823e99d98e12153683e4f07..f2c0ba6ced78bad65694cde021611d9e0e9f9f25 100644 (file)
@@ -141,6 +141,13 @@ struct lpfc_dmabuf {
        uint32_t   buffer_tag;  /* used for tagged queue ring */
 };
 
+struct lpfc_nvmet_ctxbuf {
+       struct list_head list;
+       struct lpfc_nvmet_rcv_ctx *context;
+       struct lpfc_iocbq *iocbq;
+       struct lpfc_sglq *sglq;
+};
+
 struct lpfc_dma_pool {
        struct lpfc_dmabuf   *elements;
        uint32_t    max_count;
@@ -163,9 +170,7 @@ struct rqb_dmabuf {
        struct lpfc_dmabuf dbuf;
        uint16_t total_size;
        uint16_t bytes_recv;
-       void *context;
-       struct lpfc_iocbq *iocbq;
-       struct lpfc_sglq *sglq;
+       uint16_t idx;
        struct lpfc_queue *hrq;   /* ptr to associated Header RQ */
        struct lpfc_queue *drq;   /* ptr to associated Data RQ */
 };
@@ -670,6 +675,8 @@ struct lpfc_hba {
                                        /* INIT_LINK mailbox command */
 #define LS_NPIV_FAB_SUPPORTED 0x2      /* Fabric supports NPIV */
 #define LS_IGNORE_ERATT       0x4      /* intr handler should ignore ERATT */
+#define LS_MDS_LINK_DOWN      0x8      /* MDS Diagnostics Link Down */
+#define LS_MDS_LOOPBACK      0x16      /* MDS Diagnostics Link Up (Loopback) */
 
        uint32_t hba_flag;      /* hba generic flags */
 #define HBA_ERATT_HANDLED      0x1 /* This flag is set when eratt handled */
@@ -777,7 +784,6 @@ struct lpfc_hba {
        uint32_t cfg_nvme_oas;
        uint32_t cfg_nvme_io_channel;
        uint32_t cfg_nvmet_mrq;
-       uint32_t cfg_nvmet_mrq_post;
        uint32_t cfg_enable_nvmet;
        uint32_t cfg_nvme_enable_fb;
        uint32_t cfg_nvmet_fb_size;
@@ -943,6 +949,7 @@ struct lpfc_hba {
        struct pci_pool *lpfc_mbuf_pool;
        struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
        struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
+       struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
        struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
        struct pci_pool *txrdy_payload_pool;
        struct lpfc_dma_pool lpfc_mbuf_safety_pool;
@@ -1228,7 +1235,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
 static inline struct lpfc_sli_ring *
 lpfc_phba_elsring(struct lpfc_hba *phba)
 {
-       if (phba->sli_rev == LPFC_SLI_REV4)
-               return phba->sli4_hba.els_wq->pring;
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               if (phba->sli4_hba.els_wq)
+                       return phba->sli4_hba.els_wq->pring;
+               else
+                       return NULL;
+       }
        return &phba->sli.sli3_ring[LPFC_ELS_RING];
 }
index 4830370bfab14247f567de976787aecc9d6b6d28..bb2d9e238225a43315fa846ddcab4052b4de1caa 100644 (file)
@@ -60,9 +60,9 @@
 #define LPFC_MIN_DEVLOSS_TMO   1
 #define LPFC_MAX_DEVLOSS_TMO   255
 
-#define LPFC_DEF_MRQ_POST      256
-#define LPFC_MIN_MRQ_POST      32
-#define LPFC_MAX_MRQ_POST      512
+#define LPFC_DEF_MRQ_POST      512
+#define LPFC_MIN_MRQ_POST      512
+#define LPFC_MAX_MRQ_POST      2048
 
 /*
  * Write key size should be multiple of 4. If write key is changed
@@ -205,8 +205,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                                atomic_read(&tgtp->xmt_ls_rsp_error));
 
                len += snprintf(buf+len, PAGE_SIZE-len,
-                               "FCP: Rcv %08x Drop %08x\n",
+                               "FCP: Rcv %08x Release %08x Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->xmt_fcp_release),
                                atomic_read(&tgtp->rcv_fcp_cmd_drop));
 
                if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
@@ -218,15 +219,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                }
 
                len += snprintf(buf+len, PAGE_SIZE-len,
-                               "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n",
+                               "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
+                               "drop %08x\n",
                                atomic_read(&tgtp->xmt_fcp_read),
                                atomic_read(&tgtp->xmt_fcp_read_rsp),
                                atomic_read(&tgtp->xmt_fcp_write),
-                               atomic_read(&tgtp->xmt_fcp_rsp));
-
-               len += snprintf(buf+len, PAGE_SIZE-len,
-                               "FCP Rsp: abort %08x drop %08x\n",
-                               atomic_read(&tgtp->xmt_fcp_abort),
+                               atomic_read(&tgtp->xmt_fcp_rsp),
                                atomic_read(&tgtp->xmt_fcp_drop));
 
                len += snprintf(buf+len, PAGE_SIZE-len,
@@ -236,10 +234,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                                atomic_read(&tgtp->xmt_fcp_rsp_drop));
 
                len += snprintf(buf+len, PAGE_SIZE-len,
-                               "ABORT: Xmt %08x Err %08x Cmpl %08x",
+                               "ABORT: Xmt %08x Cmpl %08x\n",
+                               atomic_read(&tgtp->xmt_fcp_abort),
+                               atomic_read(&tgtp->xmt_fcp_abort_cmpl));
+
+               len += snprintf(buf + len, PAGE_SIZE - len,
+                               "ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x",
+                               atomic_read(&tgtp->xmt_abort_sol),
+                               atomic_read(&tgtp->xmt_abort_unsol),
                                atomic_read(&tgtp->xmt_abort_rsp),
-                               atomic_read(&tgtp->xmt_abort_rsp_error),
-                               atomic_read(&tgtp->xmt_abort_cmpl));
+                               atomic_read(&tgtp->xmt_abort_rsp_error));
+
+               len += snprintf(buf + len, PAGE_SIZE - len,
+                               "IO_CTX: %08x outstanding %08x total %x",
+                               phba->sli4_hba.nvmet_ctx_cnt,
+                               phba->sli4_hba.nvmet_io_wait_cnt,
+                               phba->sli4_hba.nvmet_io_wait_total);
 
                len +=  snprintf(buf+len, PAGE_SIZE-len, "\n");
                return len;
@@ -3311,14 +3321,6 @@ LPFC_ATTR_R(nvmet_mrq,
            1, 1, 16,
            "Specify number of RQ pairs for processing NVMET cmds");
 
-/*
- * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
- *
- */
-LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
-           LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
-           "Specify number of buffers to post on every MRQ");
-
 /*
  * lpfc_enable_fc4_type: Defines what FC4 types are supported.
  * Supported Values:  1 - register just FCP
@@ -5154,7 +5156,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_suppress_rsp,
        &dev_attr_lpfc_nvme_io_channel,
        &dev_attr_lpfc_nvmet_mrq,
-       &dev_attr_lpfc_nvmet_mrq_post,
        &dev_attr_lpfc_nvme_enable_fb,
        &dev_attr_lpfc_nvmet_fb_size,
        &dev_attr_lpfc_enable_bg,
@@ -6194,7 +6195,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 
        lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
        lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
-       lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
 
        /* Initialize first burst. Target vs Initiator are different. */
        lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
@@ -6291,7 +6291,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
                /* Not NVME Target mode.  Turn off Target parameters. */
                phba->nvmet_support = 0;
                phba->cfg_nvmet_mrq = 0;
-               phba->cfg_nvmet_mrq_post = 0;
                phba->cfg_nvmet_fb_size = 0;
        }
 
index 1c55408ac718a94f9aa622210a0ebf96a9896137..8912767e7bc88cc407ea3fb372f242e2cbccd0de 100644 (file)
@@ -75,6 +75,10 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
 void lpfc_retry_pport_discovery(struct lpfc_hba *);
 void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
+int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
+void lpfc_free_iocb_list(struct lpfc_hba *phba);
+int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+                       struct lpfc_queue *drq, int count, int idx);
 
 void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -246,16 +250,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
 void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
 struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
 void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
-void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
-                       struct lpfc_dmabuf *mp);
+void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
+                           struct lpfc_nvmet_ctxbuf *ctxp);
 int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
                               struct fc_frame_header *fc_hdr);
 void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
                        uint16_t);
 int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
                     struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
-int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
-                       struct lpfc_queue *dq, int count);
 int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
 void lpfc_unregister_fcf(struct lpfc_hba *);
 void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
@@ -271,6 +273,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
 void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
 
 int lpfc_mem_alloc(struct lpfc_hba *, int align);
+int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba);
 int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
 void lpfc_mem_free(struct lpfc_hba *);
 void lpfc_mem_free_all(struct lpfc_hba *);
index c7962dae4dab8c7130dcb46fa1b7d45dac0509eb..f2cd19c6c2df9fd77516d18fddf2de04cf531437 100644 (file)
@@ -2092,6 +2092,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
 
        ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
        ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
+       ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
        ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
        size = FOURBYTES + 32;
        ad->AttrLen = cpu_to_be16(size);
index fce549a91911c197d8e616bce9e0c13cb17de535..4bcb92c844ca5f5061c8a4a5cdfb3d7835594162 100644 (file)
@@ -797,11 +797,6 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                atomic_read(&tgtp->xmt_fcp_write),
                                atomic_read(&tgtp->xmt_fcp_rsp));
 
-               len += snprintf(buf + len, size - len,
-                               "FCP Rsp: abort %08x drop %08x\n",
-                               atomic_read(&tgtp->xmt_fcp_abort),
-                               atomic_read(&tgtp->xmt_fcp_drop));
-
                len += snprintf(buf + len, size - len,
                                "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
                                atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
@@ -809,10 +804,16 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                atomic_read(&tgtp->xmt_fcp_rsp_drop));
 
                len += snprintf(buf + len, size - len,
-                               "ABORT: Xmt %08x Err %08x Cmpl %08x",
+                               "ABORT: Xmt %08x Cmpl %08x\n",
+                               atomic_read(&tgtp->xmt_fcp_abort),
+                               atomic_read(&tgtp->xmt_fcp_abort_cmpl));
+
+               len += snprintf(buf + len, size - len,
+                               "ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x",
+                               atomic_read(&tgtp->xmt_abort_sol),
+                               atomic_read(&tgtp->xmt_abort_unsol),
                                atomic_read(&tgtp->xmt_abort_rsp),
-                               atomic_read(&tgtp->xmt_abort_rsp_error),
-                               atomic_read(&tgtp->xmt_abort_cmpl));
+                               atomic_read(&tgtp->xmt_abort_rsp_error));
 
                len +=  snprintf(buf + len, size - len, "\n");
 
@@ -841,6 +842,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                        }
                        spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
                }
+
+               len += snprintf(buf + len, size - len,
+                               "IO_CTX: %08x  outstanding %08x total %08x\n",
+                               phba->sli4_hba.nvmet_ctx_cnt,
+                               phba->sli4_hba.nvmet_io_wait_cnt,
+                               phba->sli4_hba.nvmet_io_wait_total);
        } else {
                if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
                        return len;
@@ -1959,6 +1966,7 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
                atomic_set(&tgtp->rcv_ls_req_out, 0);
                atomic_set(&tgtp->rcv_ls_req_drop, 0);
                atomic_set(&tgtp->xmt_ls_abort, 0);
+               atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
                atomic_set(&tgtp->xmt_ls_rsp, 0);
                atomic_set(&tgtp->xmt_ls_drop, 0);
                atomic_set(&tgtp->xmt_ls_rsp_error, 0);
@@ -1967,19 +1975,22 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
                atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
                atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
                atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
-               atomic_set(&tgtp->xmt_fcp_abort, 0);
                atomic_set(&tgtp->xmt_fcp_drop, 0);
                atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
                atomic_set(&tgtp->xmt_fcp_read, 0);
                atomic_set(&tgtp->xmt_fcp_write, 0);
                atomic_set(&tgtp->xmt_fcp_rsp, 0);
+               atomic_set(&tgtp->xmt_fcp_release, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
 
+               atomic_set(&tgtp->xmt_fcp_abort, 0);
+               atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
+               atomic_set(&tgtp->xmt_abort_sol, 0);
+               atomic_set(&tgtp->xmt_abort_unsol, 0);
                atomic_set(&tgtp->xmt_abort_rsp, 0);
                atomic_set(&tgtp->xmt_abort_rsp_error, 0);
-               atomic_set(&tgtp->xmt_abort_cmpl, 0);
        }
        return nbytes;
 }
@@ -3070,11 +3081,11 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
                        qp->assoc_qid, qp->q_cnt_1,
                        (unsigned long long)qp->q_cnt_4);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                       "\t\tWQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
-                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                       "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+                       "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
                        qp->queue_id, qp->entry_count,
                        qp->entry_size, qp->host_index,
-                       qp->hba_index);
+                       qp->hba_index, qp->entry_repost);
        len +=  snprintf(pbuffer + len,
                        LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
        return len;
@@ -3121,11 +3132,11 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
                        qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
                        qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                       "\tCQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
-                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                       "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+                       "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
                        qp->queue_id, qp->entry_count,
                        qp->entry_size, qp->host_index,
-                       qp->hba_index);
+                       qp->hba_index, qp->entry_repost);
 
        len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
 
@@ -3143,20 +3154,20 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
                        "\t\t%s RQ info: ", rqtype);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
-                       "trunc:x%x rcv:x%llx]\n",
+                       "posted:x%x rcv:x%llx]\n",
                        qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
                        qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                       "\t\tHQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
-                       "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+                       "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+                       "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
                        qp->queue_id, qp->entry_count, qp->entry_size,
-                       qp->host_index, qp->hba_index);
+                       qp->host_index, qp->hba_index, qp->entry_repost);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                       "\t\tDQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
-                       "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+                       "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+                       "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
                        datqp->queue_id, datqp->entry_count,
                        datqp->entry_size, datqp->host_index,
-                       datqp->hba_index);
+                       datqp->hba_index, datqp->entry_repost);
        return len;
 }
 
@@ -3242,10 +3253,10 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
                        eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
                        (unsigned long long)qp->q_cnt_4);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                       "EQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
-                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                       "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+                       "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
                        qp->queue_id, qp->entry_count, qp->entry_size,
-                       qp->host_index, qp->hba_index);
+                       qp->host_index, qp->hba_index, qp->entry_repost);
        len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
 
        return len;
@@ -5855,8 +5866,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
                        atomic_dec(&lpfc_debugfs_hba_count);
                }
 
-               debugfs_remove(lpfc_debugfs_root); /* lpfc */
-               lpfc_debugfs_root = NULL;
+               if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
+                       debugfs_remove(lpfc_debugfs_root); /* lpfc */
+                       lpfc_debugfs_root = NULL;
+               }
        }
 #endif
        return;
index 9d5a379f4b15734a484c643fc9cc81b0ba8b33e9..094c97b9e5f741faba5e0ef904f97faa3cc7a734 100644 (file)
@@ -90,6 +90,7 @@ struct lpfc_nodelist {
 #define NLP_FCP_INITIATOR  0x10                        /* entry is an FCP Initiator */
 #define NLP_NVME_TARGET    0x20                        /* entry is a NVME Target */
 #define NLP_NVME_INITIATOR 0x40                        /* entry is a NVME Initiator */
+#define NLP_NVME_DISCOVERY 0x80                 /* entry has NVME disc srvc */
 
        uint16_t        nlp_fc4_type;           /* FC types node supports. */
                                                /* Assigned from GID_FF, only
index 67827e397431abe8b55955d9cc6497cbf680c054..8e532b39ae93af5c35a1199084f606b627e37ff0 100644 (file)
@@ -1047,6 +1047,13 @@ stop_rr_fcf_flogi:
                                 irsp->ulpStatus, irsp->un.ulpWord[4],
                                 irsp->ulpTimeout);
 
+
+               /* If this is not a loop open failure, bail out */
+               if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+                     ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+                                       IOERR_LOOP_OPEN_FAILURE)))
+                       goto flogifail;
+
                /* FLOGI failed, so there is no fabric */
                spin_lock_irq(shost->host_lock);
                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -2077,16 +2084,19 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
        if (irsp->ulpStatus) {
                /* Check for retry */
+               ndlp->fc4_prli_sent--;
                if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
                        /* ELS command is being retried */
-                       ndlp->fc4_prli_sent--;
                        goto out;
                }
+
                /* PRLI failed */
                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-                                "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
+                                "2754 PRLI failure DID:%06X Status:x%x/x%x, "
+                                "data: x%x\n",
                                 ndlp->nlp_DID, irsp->ulpStatus,
-                                irsp->un.ulpWord[4]);
+                                irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
+
                /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
                if (lpfc_error_lost_link(irsp))
                        goto out;
@@ -7441,6 +7451,13 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
         */
        spin_lock_irq(&phba->hbalock);
        pring = lpfc_phba_elsring(phba);
+
+       /* Bail out if we've no ELS wq, like in PCI error recovery case. */
+       if (unlikely(!pring)) {
+               spin_unlock_irq(&phba->hbalock);
+               return;
+       }
+
        if (phba->sli_rev == LPFC_SLI_REV4)
                spin_lock(&pring->ring_lock);
 
@@ -8667,7 +8684,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                lpfc_do_scr_ns_plogi(phba, vport);
        goto out;
 fdisc_failed:
-       if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)
+       if (vport->fc_vport &&
+           (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
        /* Cancel discovery timer */
        lpfc_can_disctmo(vport);
index 0482c558033104d3a44f75290750e1acdc3ee0d9..3ffcd9215ca892eb7ef3e5972df50a427ef17369 100644 (file)
@@ -693,15 +693,16 @@ lpfc_work_done(struct lpfc_hba *phba)
        pring = lpfc_phba_elsring(phba);
        status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
        status >>= (4*LPFC_ELS_RING);
-       if ((status & HA_RXMASK) ||
-           (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
-           (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
+       if (pring && (status & HA_RXMASK ||
+                     pring->flag & LPFC_DEFERRED_RING_EVENT ||
+                     phba->hba_flag & HBA_SP_QUEUE_EVT)) {
                if (pring->flag & LPFC_STOP_IOCB_EVENT) {
                        pring->flag |= LPFC_DEFERRED_RING_EVENT;
                        /* Set the lpfc data pending flag */
                        set_bit(LPFC_DATA_READY, &phba->data_flags);
                } else {
-                       if (phba->link_state >= LPFC_LINK_UP) {
+                       if (phba->link_state >= LPFC_LINK_UP ||
+                           phba->link_flag & LS_MDS_LOOPBACK) {
                                pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
                                lpfc_sli_handle_slow_ring_event(phba, pring,
                                                                (status &
index 1d12f2be36bcccd336f8892aa0a20d3109d7150c..e0a5fce416aeea7604ab9a46464d8514e82fb0cb 100644 (file)
@@ -1356,6 +1356,7 @@ struct lpfc_mbx_wq_destroy {
 
 #define LPFC_HDR_BUF_SIZE 128
 #define LPFC_DATA_BUF_SIZE 2048
+#define LPFC_NVMET_DATA_BUF_SIZE 128
 struct rq_context {
        uint32_t word0;
 #define lpfc_rq_context_rqe_count_SHIFT        16      /* Version 0 Only */
@@ -4420,6 +4421,19 @@ struct fcp_treceive64_wqe {
 };
 #define TXRDY_PAYLOAD_LEN      12
 
+#define CMD_SEND_FRAME 0xE1
+
+struct send_frame_wqe {
+       struct ulp_bde64 bde;          /* words 0-2 */
+       uint32_t frame_len;            /* word 3 */
+       uint32_t fc_hdr_wd0;           /* word 4 */
+       uint32_t fc_hdr_wd1;           /* word 5 */
+       struct wqe_common wqe_com;     /* words 6-11 */
+       uint32_t fc_hdr_wd2;           /* word 12 */
+       uint32_t fc_hdr_wd3;           /* word 13 */
+       uint32_t fc_hdr_wd4;           /* word 14 */
+       uint32_t fc_hdr_wd5;           /* word 15 */
+};
 
 union lpfc_wqe {
        uint32_t words[16];
@@ -4438,7 +4452,7 @@ union lpfc_wqe {
        struct fcp_trsp64_wqe fcp_trsp;
        struct fcp_tsend64_wqe fcp_tsend;
        struct fcp_treceive64_wqe fcp_treceive;
-
+       struct send_frame_wqe send_frame;
 };
 
 union lpfc_wqe128 {
index 4b1eb98c228df823a986f5568f8b948a7b2ef9bf..9add9473cae52a1f2bf5d1b78a8854a57c9f6192 100644 (file)
@@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
 
                list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
                        ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
-                       lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+                       lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
                }
        }
 
@@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
 {
        struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
        uint16_t i, lxri, xri_cnt, els_xri_cnt;
-       uint16_t nvmet_xri_cnt, tot_cnt;
+       uint16_t nvmet_xri_cnt;
        LIST_HEAD(nvmet_sgl_list);
        int rc;
 
@@ -3389,15 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
         * update on pci function's nvmet xri-sgl list
         */
        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-       nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
-       tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
-       if (nvmet_xri_cnt > tot_cnt) {
-               phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
-               nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
-               lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-                               "6301 NVMET post-sgl count changed to %d\n",
-                               phba->cfg_nvmet_mrq_post);
-       }
+
+       /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
+       nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
 
        if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
                /* els xri-sgl expanded */
@@ -4546,6 +4540,19 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
        pmb->vport = phba->pport;
 
        if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
+               phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
+
+               switch (phba->sli4_hba.link_state.status) {
+               case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
+                       phba->link_flag |= LS_MDS_LINK_DOWN;
+                       break;
+               case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
+                       phba->link_flag |= LS_MDS_LOOPBACK;
+                       break;
+               default:
+                       break;
+               }
+
                /* Parse and translate status field */
                mb = &pmb->u.mb;
                mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
@@ -5830,6 +5837,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+               INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
+               INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
+
                /* Fast-path XRI aborted CQ Event work queue list */
                INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
        }
@@ -5837,6 +5847,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        /* This abort list used by worker thread */
        spin_lock_init(&phba->sli4_hba.sgl_list_lock);
        spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
+       spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
 
        /*
         * Initialize driver internal slow-path work queues
@@ -5951,16 +5962,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
                        if (wwn == lpfc_enable_nvmet[i]) {
 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+                               if (lpfc_nvmet_mem_alloc(phba))
+                                       break;
+
+                               phba->nvmet_support = 1; /* a match */
+
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "6017 NVME Target %016llx\n",
                                                wwn);
-                               phba->nvmet_support = 1; /* a match */
 #else
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "6021 Can't enable NVME Target."
                                                " NVME_TARGET_FC infrastructure"
                                                " is not in kernel\n");
 #endif
+                               break;
                        }
                }
        }
@@ -6269,7 +6285,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
  *
  * This routine is invoked to free the driver's IOCB list and memory.
  **/
-static void
+void
 lpfc_free_iocb_list(struct lpfc_hba *phba)
 {
        struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
@@ -6297,7 +6313,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
  *     0 - successful
  *     other values - error
  **/
-static int
+int
 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
 {
        struct lpfc_iocbq *iocbq_entry = NULL;
@@ -6525,7 +6541,6 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
        uint16_t rpi_limit, curr_rpi_range;
        struct lpfc_dmabuf *dmabuf;
        struct lpfc_rpi_hdr *rpi_hdr;
-       uint32_t rpi_count;
 
        /*
         * If the SLI4 port supports extents, posting the rpi header isn't
@@ -6538,8 +6553,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
                return NULL;
 
        /* The limit on the logical index is just the max_rpi count. */
-       rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
-       phba->sli4_hba.max_cfg_param.max_rpi - 1;
+       rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
 
        spin_lock_irq(&phba->hbalock);
        /*
@@ -6550,18 +6564,10 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
        curr_rpi_range = phba->sli4_hba.next_rpi;
        spin_unlock_irq(&phba->hbalock);
 
-       /*
-        * The port has a limited number of rpis. The increment here
-        * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
-        * and to allow the full max_rpi range per port.
-        */
-       if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
-               rpi_count = rpi_limit - curr_rpi_range;
-       else
-               rpi_count = LPFC_RPI_HDR_COUNT;
-
-       if (!rpi_count)
+       /* Reached full RPI range */
+       if (curr_rpi_range == rpi_limit)
                return NULL;
+
        /*
         * First allocate the protocol header region for the port.  The
         * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -6595,13 +6601,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
 
        /* The rpi_hdr stores the logical index only. */
        rpi_hdr->start_rpi = curr_rpi_range;
+       rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
        list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
 
-       /*
-        * The next_rpi stores the next logical module-64 rpi value used
-        * to post physical rpis in subsequent rpi postings.
-        */
-       phba->sli4_hba.next_rpi += rpi_count;
        spin_unlock_irq(&phba->hbalock);
        return rpi_hdr;
 
@@ -8172,7 +8174,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                        /* Create NVMET Receive Queue for header */
                        qdesc = lpfc_sli4_queue_alloc(phba,
                                                      phba->sli4_hba.rq_esize,
-                                                     phba->sli4_hba.rq_ecount);
+                                                     LPFC_NVMET_RQE_DEF_COUNT);
                        if (!qdesc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "3146 Failed allocate "
@@ -8194,7 +8196,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                        /* Create NVMET Receive Queue for data */
                        qdesc = lpfc_sli4_queue_alloc(phba,
                                                      phba->sli4_hba.rq_esize,
-                                                     phba->sli4_hba.rq_ecount);
+                                                     LPFC_NVMET_RQE_DEF_COUNT);
                        if (!qdesc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "3156 Failed allocate "
@@ -8325,46 +8327,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
 }
 
-int
-lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
-                   struct lpfc_queue *drq, int count)
-{
-       int rc, i;
-       struct lpfc_rqe hrqe;
-       struct lpfc_rqe drqe;
-       struct lpfc_rqb *rqbp;
-       struct rqb_dmabuf *rqb_buffer;
-       LIST_HEAD(rqb_buf_list);
-
-       rqbp = hrq->rqbp;
-       for (i = 0; i < count; i++) {
-               rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
-               if (!rqb_buffer)
-                       break;
-               rqb_buffer->hrq = hrq;
-               rqb_buffer->drq = drq;
-               list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
-       }
-       while (!list_empty(&rqb_buf_list)) {
-               list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
-                                hbuf.list);
-
-               hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
-               hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
-               drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
-               drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
-               rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
-               if (rc < 0) {
-                       (rqbp->rqb_free_buffer)(phba, rqb_buffer);
-               } else {
-                       list_add_tail(&rqb_buffer->hbuf.list,
-                                     &rqbp->rqb_buffer_list);
-                       rqbp->buffer_count++;
-               }
-       }
-       return 1;
-}
-
 int
 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
 {
@@ -8784,9 +8746,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                goto out_destroy;
        }
 
-       lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
-       lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
-
        rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
                            phba->sli4_hba.els_cq, LPFC_USOL);
        if (rc) {
@@ -11110,7 +11069,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        struct lpfc_hba   *phba;
        struct lpfc_vport *vport = NULL;
        struct Scsi_Host  *shost = NULL;
-       int error, cnt;
+       int error;
        uint32_t cfg_mode, intr_mode;
 
        /* Allocate memory for HBA structure */
@@ -11144,22 +11103,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
                goto out_unset_pci_mem_s4;
        }
 
-       cnt = phba->cfg_iocb_cnt * 1024;
-       if (phba->nvmet_support)
-               cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq;
-
-       /* Initialize and populate the iocb list per host */
-       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                       "2821 initialize iocb list %d total %d\n",
-                       phba->cfg_iocb_cnt, cnt);
-       error = lpfc_init_iocb_list(phba, cnt);
-
-       if (error) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "1413 Failed to initialize iocb list.\n");
-               goto out_unset_driver_resource_s4;
-       }
-
        INIT_LIST_HEAD(&phba->active_rrq_list);
        INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
 
@@ -11168,7 +11111,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        if (error) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "1414 Failed to set up driver resource.\n");
-               goto out_free_iocb_list;
+               goto out_unset_driver_resource_s4;
        }
 
        /* Get the default values for Model Name and Description */
@@ -11268,8 +11211,6 @@ out_destroy_shost:
        lpfc_destroy_shost(phba);
 out_unset_driver_resource:
        lpfc_unset_driver_resource_phase2(phba);
-out_free_iocb_list:
-       lpfc_free_iocb_list(phba);
 out_unset_driver_resource_s4:
        lpfc_sli4_driver_resource_unset(phba);
 out_unset_pci_mem_s4:
index 5986c7957199df6ef97343a3c0402931cbdeb7ad..fcc05a1517c21d5134282e6cc9337ade5ee1a5c9 100644 (file)
@@ -214,6 +214,21 @@ fail_free_drb_pool:
        return -ENOMEM;
 }
 
+int
+lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
+{
+       phba->lpfc_nvmet_drb_pool =
+               pci_pool_create("lpfc_nvmet_drb_pool",
+                               phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
+                               SGL_ALIGN_SZ, 0);
+       if (!phba->lpfc_nvmet_drb_pool) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "6024 Can't enable NVME Target - no memory\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
 /**
  * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
  * @phba: HBA to free memory for
@@ -232,6 +247,9 @@ lpfc_mem_free(struct lpfc_hba *phba)
 
        /* Free HBQ pools */
        lpfc_sli_hbqbuf_free_all(phba);
+       if (phba->lpfc_nvmet_drb_pool)
+               pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
+       phba->lpfc_nvmet_drb_pool = NULL;
        if (phba->lpfc_drb_pool)
                pci_pool_destroy(phba->lpfc_drb_pool);
        phba->lpfc_drb_pool = NULL;
@@ -611,8 +629,6 @@ struct rqb_dmabuf *
 lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
 {
        struct rqb_dmabuf *dma_buf;
-       struct lpfc_iocbq *nvmewqe;
-       union lpfc_wqe128 *wqe;
 
        dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
        if (!dma_buf)
@@ -624,69 +640,15 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
                kfree(dma_buf);
                return NULL;
        }
-       dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
-                                           &dma_buf->dbuf.phys);
+       dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
+                                           GFP_KERNEL, &dma_buf->dbuf.phys);
        if (!dma_buf->dbuf.virt) {
                pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
                              dma_buf->hbuf.phys);
                kfree(dma_buf);
                return NULL;
        }
-       dma_buf->total_size = LPFC_DATA_BUF_SIZE;
-
-       dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
-                                  GFP_KERNEL);
-       if (!dma_buf->context) {
-               pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
-                             dma_buf->dbuf.phys);
-               pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
-                             dma_buf->hbuf.phys);
-               kfree(dma_buf);
-               return NULL;
-       }
-
-       dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
-       if (!dma_buf->iocbq) {
-               kfree(dma_buf->context);
-               pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
-                             dma_buf->dbuf.phys);
-               pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
-                             dma_buf->hbuf.phys);
-               kfree(dma_buf);
-               lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
-                               "2621 Ran out of nvmet iocb/WQEs\n");
-               return NULL;
-       }
-       dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
-       nvmewqe = dma_buf->iocbq;
-       wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
-       /* Initialize WQE */
-       memset(wqe, 0, sizeof(union lpfc_wqe));
-       /* Word 7 */
-       bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
-       bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
-       bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
-       /* Word 10 */
-       bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
-       bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
-       bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
-
-       dma_buf->iocbq->context1 = NULL;
-       spin_lock(&phba->sli4_hba.sgl_list_lock);
-       dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
-       spin_unlock(&phba->sli4_hba.sgl_list_lock);
-       if (!dma_buf->sglq) {
-               lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
-               kfree(dma_buf->context);
-               pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
-                             dma_buf->dbuf.phys);
-               pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
-                             dma_buf->hbuf.phys);
-               kfree(dma_buf);
-               lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
-                               "6132 Ran out of nvmet XRIs\n");
-               return NULL;
-       }
+       dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
        return dma_buf;
 }
 
@@ -705,20 +667,9 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
 void
 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
 {
-       unsigned long flags;
-
-       __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
-       dmab->sglq->state = SGL_FREED;
-       dmab->sglq->ndlp = NULL;
-
-       spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
-       list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
-       spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);
-
-       lpfc_sli_release_iocbq(phba, dmab->iocbq);
-       kfree(dmab->context);
        pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
-       pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+       pci_pool_free(phba->lpfc_nvmet_drb_pool,
+                     dmab->dbuf.virt, dmab->dbuf.phys);
        kfree(dmab);
 }
 
@@ -803,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
        rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
        if (rc < 0) {
                (rqbp->rqb_free_buffer)(phba, rqb_entry);
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "6409 Cannot post to RQ %d: %x %x\n",
+                               rqb_entry->hrq->queue_id,
+                               rqb_entry->hrq->host_index,
+                               rqb_entry->hrq->hba_index);
        } else {
                list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
                rqbp->buffer_count++;
index 8777c2d5f50d35ecae18223da67245157811b4be..bff3de053df475365193ea47b153c13795f9c816 100644 (file)
@@ -1944,7 +1944,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
                /* Target driver cannot solicit NVME FB. */
                if (bf_get_be32(prli_tgt, nvpr)) {
+                       /* Complete the nvme target roles.  The transport
+                        * needs to know if the rport is capable of
+                        * discovery in addition to its role.
+                        */
                        ndlp->nlp_type |= NLP_NVME_TARGET;
+                       if (bf_get_be32(prli_disc, nvpr))
+                               ndlp->nlp_type |= NLP_NVME_DISCOVERY;
                        if ((bf_get_be32(prli_fba, nvpr) == 1) &&
                            (bf_get_be32(prli_fb_sz, nvpr) > 0) &&
                            (phba->cfg_nvme_enable_fb) &&
index 0488580eea12eecd0c2767bc6ad5c11a014ac46d..074a6b5e7763510555d9b7f9f7e34e095af1b0f4 100644 (file)
@@ -142,7 +142,7 @@ out:
 }
 
 /**
- * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context
+ * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
  * @phba: HBA buffer is associated with
  * @ctxp: context to clean up
  * @mp: Buffer to free
@@ -155,24 +155,113 @@ out:
  * Returns: None
  **/
 void
-lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
-                  struct lpfc_dmabuf *mp)
+lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
 {
-       if (ctxp) {
-               if (ctxp->flag)
-                       lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                               "6314 rq_post ctx xri x%x flag x%x\n",
-                               ctxp->oxid, ctxp->flag);
-
-               if (ctxp->txrdy) {
-                       pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
-                                     ctxp->txrdy_phys);
-                       ctxp->txrdy = NULL;
-                       ctxp->txrdy_phys = 0;
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+       struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+       struct lpfc_nvmet_tgtport *tgtp;
+       struct fc_frame_header *fc_hdr;
+       struct rqb_dmabuf *nvmebuf;
+       struct lpfc_dmabuf *hbufp;
+       uint32_t *payload;
+       uint32_t size, oxid, sid, rc;
+       unsigned long iflag;
+
+       if (ctxp->txrdy) {
+               pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
+                             ctxp->txrdy_phys);
+               ctxp->txrdy = NULL;
+               ctxp->txrdy_phys = 0;
+       }
+       ctxp->state = LPFC_NVMET_STE_FREE;
+
+       spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+       if (phba->sli4_hba.nvmet_io_wait_cnt) {
+               hbufp = &nvmebuf->hbuf;
+               list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
+                                nvmebuf, struct rqb_dmabuf,
+                                hbuf.list);
+               phba->sli4_hba.nvmet_io_wait_cnt--;
+               spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+                                      iflag);
+
+               fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+               oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+               tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+               payload = (uint32_t *)(nvmebuf->dbuf.virt);
+               size = nvmebuf->bytes_recv;
+               sid = sli4_sid_from_fc_hdr(fc_hdr);
+
+               ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+               memset(ctxp, 0, sizeof(ctxp->ctx));
+               ctxp->wqeq = NULL;
+               ctxp->txrdy = NULL;
+               ctxp->offset = 0;
+               ctxp->phba = phba;
+               ctxp->size = size;
+               ctxp->oxid = oxid;
+               ctxp->sid = sid;
+               ctxp->state = LPFC_NVMET_STE_RCV;
+               ctxp->entry_cnt = 1;
+               ctxp->flag = 0;
+               ctxp->ctxbuf = ctx_buf;
+               spin_lock_init(&ctxp->ctxlock);
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+               if (phba->ktime_on) {
+                       ctxp->ts_cmd_nvme = ktime_get_ns();
+                       ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
+                       ctxp->ts_nvme_data = 0;
+                       ctxp->ts_data_wqput = 0;
+                       ctxp->ts_isr_data = 0;
+                       ctxp->ts_data_nvme = 0;
+                       ctxp->ts_nvme_status = 0;
+                       ctxp->ts_status_wqput = 0;
+                       ctxp->ts_isr_status = 0;
+                       ctxp->ts_status_nvme = 0;
                }
-               ctxp->state = LPFC_NVMET_STE_FREE;
+#endif
+               atomic_inc(&tgtp->rcv_fcp_cmd_in);
+               /*
+                * The calling sequence should be:
+                * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
+                * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+                * When we return from nvmet_fc_rcv_fcp_req, all relevant info
+                * the NVME command / FC header is stored.
+                * A buffer has already been reposted for this IO, so just free
+                * the nvmebuf.
+                */
+               rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
+                                         payload, size);
+
+               /* Process FCP command */
+               if (rc == 0) {
+                       atomic_inc(&tgtp->rcv_fcp_cmd_out);
+                       nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+                       return;
+               }
+
+               atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+               lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+                               "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
+                               ctxp->oxid, rc,
+                               atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->rcv_fcp_cmd_out),
+                               atomic_read(&tgtp->xmt_fcp_release));
+
+               lpfc_nvmet_defer_release(phba, ctxp);
+               lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+               nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+               return;
        }
-       lpfc_rq_buf_free(phba, mp);
+       spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+
+       spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
+       list_add_tail(&ctx_buf->list,
+                     &phba->sli4_hba.lpfc_nvmet_ctx_list);
+       phba->sli4_hba.nvmet_ctx_cnt++;
+       spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
+#endif
 }
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -502,6 +591,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
                                "6150 LS Drop IO x%x: Prep\n",
                                ctxp->oxid);
                lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+               atomic_inc(&nvmep->xmt_ls_abort);
                lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
                                                ctxp->sid, ctxp->oxid);
                return -ENOMEM;
@@ -545,6 +635,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
        lpfc_nlp_put(nvmewqeq->context1);
 
        lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+       atomic_inc(&nvmep->xmt_ls_abort);
        lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
        return -ENXIO;
 }
@@ -612,9 +703,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
        lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
                         ctxp->oxid, rsp->op, rsp->rsplen);
 
+       ctxp->flag |= LPFC_NVMET_IO_INP;
        rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
        if (rc == WQE_SUCCESS) {
-               ctxp->flag |= LPFC_NVMET_IO_INP;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
                if (!phba->ktime_on)
                        return 0;
@@ -692,6 +783,7 @@ static void
 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
                           struct nvmefc_tgt_fcp_req *rsp)
 {
+       struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
        struct lpfc_nvmet_rcv_ctx *ctxp =
                container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
        struct lpfc_hba *phba = ctxp->phba;
@@ -710,10 +802,12 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
        lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
                         ctxp->state, 0);
 
+       atomic_inc(&lpfc_nvmep->xmt_fcp_release);
+
        if (aborting)
                return;
 
-       lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+       lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
 }
 
 static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -734,17 +828,128 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
        .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
 };
 
+void
+lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+{
+       struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
+       unsigned long flags;
+
+       list_for_each_entry_safe(
+               ctx_buf, next_ctx_buf,
+               &phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
+               spin_lock_irqsave(
+                       &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+               list_del_init(&ctx_buf->list);
+               spin_unlock_irqrestore(
+                       &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+               __lpfc_clear_active_sglq(phba,
+                                        ctx_buf->sglq->sli4_lxritag);
+               ctx_buf->sglq->state = SGL_FREED;
+               ctx_buf->sglq->ndlp = NULL;
+
+               spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
+               list_add_tail(&ctx_buf->sglq->list,
+                             &phba->sli4_hba.lpfc_nvmet_sgl_list);
+               spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
+                                      flags);
+
+               lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+               kfree(ctx_buf->context);
+       }
+}
+
+int
+lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
+{
+       struct lpfc_nvmet_ctxbuf *ctx_buf;
+       struct lpfc_iocbq *nvmewqe;
+       union lpfc_wqe128 *wqe;
+       int i;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+                       "6403 Allocate NVMET resources for %d XRIs\n",
+                       phba->sli4_hba.nvmet_xri_cnt);
+
+       /* For all nvmet xris, allocate resources needed to process a
+        * received command on a per xri basis.
+        */
+       for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
+               ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
+               if (!ctx_buf) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                                       "6404 Ran out of memory for NVMET\n");
+                       return -ENOMEM;
+               }
+
+               ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
+                                          GFP_KERNEL);
+               if (!ctx_buf->context) {
+                       kfree(ctx_buf);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                                       "6405 Ran out of NVMET "
+                                       "context memory\n");
+                       return -ENOMEM;
+               }
+               ctx_buf->context->ctxbuf = ctx_buf;
+
+               ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
+               if (!ctx_buf->iocbq) {
+                       kfree(ctx_buf->context);
+                       kfree(ctx_buf);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                                       "6406 Ran out of NVMET iocb/WQEs\n");
+                       return -ENOMEM;
+               }
+               ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
+               nvmewqe = ctx_buf->iocbq;
+               wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
+               /* Initialize WQE */
+               memset(wqe, 0, sizeof(union lpfc_wqe));
+               /* Word 7 */
+               bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
+               bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
+               bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
+               /* Word 10 */
+               bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+               bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
+               bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
+
+               ctx_buf->iocbq->context1 = NULL;
+               spin_lock(&phba->sli4_hba.sgl_list_lock);
+               ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
+               spin_unlock(&phba->sli4_hba.sgl_list_lock);
+               if (!ctx_buf->sglq) {
+                       lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+                       kfree(ctx_buf->context);
+                       kfree(ctx_buf);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                                       "6407 Ran out of NVMET XRIs\n");
+                       return -ENOMEM;
+               }
+               spin_lock(&phba->sli4_hba.nvmet_io_lock);
+               list_add_tail(&ctx_buf->list,
+                             &phba->sli4_hba.lpfc_nvmet_ctx_list);
+               spin_unlock(&phba->sli4_hba.nvmet_io_lock);
+       }
+       phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
+       return 0;
+}
+
 int
 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
 {
        struct lpfc_vport  *vport = phba->pport;
        struct lpfc_nvmet_tgtport *tgtp;
        struct nvmet_fc_port_info pinfo;
-       int error = 0;
+       int error;
 
        if (phba->targetport)
                return 0;
 
+       error = lpfc_nvmet_setup_io_context(phba);
+       if (error)
+               return error;
+
        memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
        pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
        pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
@@ -772,13 +977,16 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
                                             &phba->pcidev->dev,
                                             &phba->targetport);
 #else
-       error = -ENOMEM;
+       error = -ENOENT;
 #endif
        if (error) {
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
                                "6025 Cannot register NVME targetport "
                                "x%x\n", error);
                phba->targetport = NULL;
+
+               lpfc_nvmet_cleanup_io_context(phba);
+
        } else {
                tgtp = (struct lpfc_nvmet_tgtport *)
                        phba->targetport->private;
@@ -795,6 +1003,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
                atomic_set(&tgtp->rcv_ls_req_out, 0);
                atomic_set(&tgtp->rcv_ls_req_drop, 0);
                atomic_set(&tgtp->xmt_ls_abort, 0);
+               atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
                atomic_set(&tgtp->xmt_ls_rsp, 0);
                atomic_set(&tgtp->xmt_ls_drop, 0);
                atomic_set(&tgtp->xmt_ls_rsp_error, 0);
@@ -802,18 +1011,21 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
                atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
                atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
                atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
-               atomic_set(&tgtp->xmt_fcp_abort, 0);
                atomic_set(&tgtp->xmt_fcp_drop, 0);
                atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
                atomic_set(&tgtp->xmt_fcp_read, 0);
                atomic_set(&tgtp->xmt_fcp_write, 0);
                atomic_set(&tgtp->xmt_fcp_rsp, 0);
+               atomic_set(&tgtp->xmt_fcp_release, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+               atomic_set(&tgtp->xmt_fcp_abort, 0);
+               atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
+               atomic_set(&tgtp->xmt_abort_unsol, 0);
+               atomic_set(&tgtp->xmt_abort_sol, 0);
                atomic_set(&tgtp->xmt_abort_rsp, 0);
                atomic_set(&tgtp->xmt_abort_rsp_error, 0);
-               atomic_set(&tgtp->xmt_abort_cmpl, 0);
        }
        return error;
 }
@@ -864,7 +1076,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
        list_for_each_entry_safe(ctxp, next_ctxp,
                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
                                 list) {
-               if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
+               if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
                        continue;
 
                /* Check if we already received a free context call
@@ -885,7 +1097,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
                    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
                     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
                        lpfc_set_rrq_active(phba, ndlp,
-                               ctxp->rqb_buffer->sglq->sli4_lxritag,
+                               ctxp->ctxbuf->sglq->sli4_lxritag,
                                rxid, 1);
                        lpfc_sli4_abts_err_handler(phba, ndlp, axri);
                }
@@ -894,8 +1106,8 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
                                "6318 XB aborted %x flg x%x (%x)\n",
                                ctxp->oxid, ctxp->flag, released);
                if (released)
-                       lpfc_nvmet_rq_post(phba, ctxp,
-                                          &ctxp->rqb_buffer->hbuf);
+                       lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
+
                if (rrq_empty)
                        lpfc_worker_wake_up(phba);
                return;
@@ -923,7 +1135,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
        list_for_each_entry_safe(ctxp, next_ctxp,
                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
                                 list) {
-               if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
+               if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
                        continue;
 
                spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
@@ -975,6 +1187,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
                init_completion(&tgtp->tport_unreg_done);
                nvmet_fc_unregister_targetport(phba->targetport);
                wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+               lpfc_nvmet_cleanup_io_context(phba);
        }
        phba->targetport = NULL;
 #endif
@@ -1010,6 +1223,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                oxid = 0;
                size = 0;
                sid = 0;
+               ctxp = NULL;
                goto dropit;
        }
 
@@ -1104,39 +1318,71 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
        struct lpfc_nvmet_rcv_ctx *ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
        struct fc_frame_header *fc_hdr;
+       struct lpfc_nvmet_ctxbuf *ctx_buf;
        uint32_t *payload;
-       uint32_t size, oxid, sid, rc;
+       uint32_t size, oxid, sid, rc, qno;
+       unsigned long iflag;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        uint32_t id;
 #endif
 
+       ctx_buf = NULL;
        if (!nvmebuf || !phba->targetport) {
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-                               "6157 FCP Drop IO\n");
+                               "6157 NVMET FCP Drop IO\n");
                oxid = 0;
                size = 0;
                sid = 0;
+               ctxp = NULL;
                goto dropit;
        }
 
+       spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
+       if (phba->sli4_hba.nvmet_ctx_cnt) {
+               list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
+                                ctx_buf, struct lpfc_nvmet_ctxbuf, list);
+               phba->sli4_hba.nvmet_ctx_cnt--;
+       }
+       spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
 
-       tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-       payload = (uint32_t *)(nvmebuf->dbuf.virt);
        fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
-       size = nvmebuf->bytes_recv;
        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
-       sid = sli4_sid_from_fc_hdr(fc_hdr);
+       size = nvmebuf->bytes_recv;
 
-       ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context;
-       if (ctxp == NULL) {
-               atomic_inc(&tgtp->rcv_fcp_cmd_drop);
-               lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-                               "6158 FCP Drop IO x%x: Alloc\n",
-                               oxid);
-               lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
-               /* Cannot send ABTS without context */
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
+               id = smp_processor_id();
+               if (id < LPFC_CHECK_CPU_CNT)
+                       phba->cpucheck_rcv_io[id]++;
+       }
+#endif
+
+       lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
+                        oxid, size, smp_processor_id());
+
+       if (!ctx_buf) {
+               /* Queue this NVME IO to process later */
+               spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+               list_add_tail(&nvmebuf->hbuf.list,
+                             &phba->sli4_hba.lpfc_nvmet_io_wait_list);
+               phba->sli4_hba.nvmet_io_wait_cnt++;
+               phba->sli4_hba.nvmet_io_wait_total++;
+               spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+                                      iflag);
+
+               /* Post a brand new DMA buffer to RQ */
+               qno = nvmebuf->idx;
+               lpfc_post_rq_buffer(
+                       phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
+                       phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
                return;
        }
+
+       tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+       payload = (uint32_t *)(nvmebuf->dbuf.virt);
+       sid = sli4_sid_from_fc_hdr(fc_hdr);
+
+       ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
        memset(ctxp, 0, sizeof(ctxp->ctx));
        ctxp->wqeq = NULL;
        ctxp->txrdy = NULL;
@@ -1146,9 +1392,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
        ctxp->oxid = oxid;
        ctxp->sid = sid;
        ctxp->state = LPFC_NVMET_STE_RCV;
-       ctxp->rqb_buffer = nvmebuf;
        ctxp->entry_cnt = 1;
        ctxp->flag = 0;
+       ctxp->ctxbuf = ctx_buf;
        spin_lock_init(&ctxp->ctxlock);
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1164,22 +1410,16 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
                ctxp->ts_isr_status = 0;
                ctxp->ts_status_nvme = 0;
        }
-
-       if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
-               id = smp_processor_id();
-               if (id < LPFC_CHECK_CPU_CNT)
-                       phba->cpucheck_rcv_io[id]++;
-       }
 #endif
 
-       lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
-                        oxid, size, smp_processor_id());
-
        atomic_inc(&tgtp->rcv_fcp_cmd_in);
        /*
         * The calling sequence should be:
         * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
         * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+        * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
+        * the NVME command / FC header is stored, so we are free to repost
+        * the buffer.
         */
        rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
                                  payload, size);
@@ -1187,26 +1427,32 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
        /* Process FCP command */
        if (rc == 0) {
                atomic_inc(&tgtp->rcv_fcp_cmd_out);
+               lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
                return;
        }
 
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-                       "6159 FCP Drop IO x%x: err x%x\n",
-                       ctxp->oxid, rc);
+                       "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
+                       ctxp->oxid, rc,
+                       atomic_read(&tgtp->rcv_fcp_cmd_in),
+                       atomic_read(&tgtp->rcv_fcp_cmd_out),
+                       atomic_read(&tgtp->xmt_fcp_release));
 dropit:
        lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
                         oxid, size, sid);
        if (oxid) {
+               lpfc_nvmet_defer_release(phba, ctxp);
                lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+               lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
                return;
        }
 
-       if (nvmebuf) {
-               nvmebuf->iocbq->hba_wqidx = 0;
-               /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
-               lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
-       }
+       if (ctx_buf)
+               lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
+
+       if (nvmebuf)
+               lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
 #endif
 }
 
@@ -1258,7 +1504,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
                           uint64_t isr_timestamp)
 {
        if (phba->nvmet_support == 0) {
-               lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
+               lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
                return;
        }
        lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
@@ -1459,7 +1705,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
        nvmewqe = ctxp->wqeq;
        if (nvmewqe == NULL) {
                /* Allocate buffer for  command wqe */
-               nvmewqe = ctxp->rqb_buffer->iocbq;
+               nvmewqe = ctxp->ctxbuf->iocbq;
                if (nvmewqe == NULL) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                                        "6110 lpfc_nvmet_prep_fcp_wqe: No "
@@ -1486,7 +1732,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
                return NULL;
        }
 
-       sgl  = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl;
+       sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
        switch (rsp->op) {
        case NVMET_FCOP_READDATA:
        case NVMET_FCOP_READDATA_RSP:
@@ -1811,7 +2057,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        result = wcqe->parameter;
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-       atomic_inc(&tgtp->xmt_abort_cmpl);
+       if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+               atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
 
        ctxp->state = LPFC_NVMET_STE_DONE;
 
@@ -1826,6 +2073,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        }
        ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+       atomic_inc(&tgtp->xmt_abort_rsp);
 
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
                        "6165 ABORT cmpl: xri x%x flg x%x (%d) "
@@ -1834,15 +2082,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
                        wcqe->word0, wcqe->total_data_placed,
                        result, wcqe->word3);
 
+       cmdwqe->context2 = NULL;
+       cmdwqe->context3 = NULL;
        /*
         * if transport has released ctx, then can reuse it. Otherwise,
         * will be recycled by transport release call.
         */
        if (released)
-               lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+               lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
 
-       cmdwqe->context2 = NULL;
-       cmdwqe->context3 = NULL;
+       /* This is the iocbq for the abort, not the command */
        lpfc_sli_release_iocbq(phba, cmdwqe);
 
        /* Since iaab/iaar are NOT set, there is no work left.
@@ -1876,7 +2125,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        result = wcqe->parameter;
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-       atomic_inc(&tgtp->xmt_abort_cmpl);
+       if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+               atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
 
        if (!ctxp) {
                /* if context is clear, related io alrady complete */
@@ -1906,6 +2156,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        }
        ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+       atomic_inc(&tgtp->xmt_abort_rsp);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
                        "6316 ABTS cmpl xri x%x flg x%x (%x) "
@@ -1913,15 +2164,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
                        ctxp->oxid, ctxp->flag, released,
                        wcqe->word0, wcqe->total_data_placed,
                        result, wcqe->word3);
+
+       cmdwqe->context2 = NULL;
+       cmdwqe->context3 = NULL;
        /*
         * if transport has released ctx, then can reuse it. Otherwise,
         * will be recycled by transport release call.
         */
        if (released)
-               lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
-
-       cmdwqe->context2 = NULL;
-       cmdwqe->context3 = NULL;
+               lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
 
        /* Since iaab/iaar are NOT set, there is no work left.
         * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
@@ -1952,7 +2203,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        result = wcqe->parameter;
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-       atomic_inc(&tgtp->xmt_abort_cmpl);
+       atomic_inc(&tgtp->xmt_ls_abort_cmpl);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
                        "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
@@ -1983,10 +2234,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
                        sid, xri, ctxp->wqeq->sli4_xritag);
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-       if (!ctxp->wqeq) {
-               ctxp->wqeq = ctxp->rqb_buffer->iocbq;
-               ctxp->wqeq->hba_wqidx = 0;
-       }
 
        ndlp = lpfc_findnode_did(phba->pport, sid);
        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
@@ -2082,7 +2329,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
        if (!ctxp->wqeq) {
-               ctxp->wqeq = ctxp->rqb_buffer->iocbq;
+               ctxp->wqeq = ctxp->ctxbuf->iocbq;
                ctxp->wqeq->hba_wqidx = 0;
        }
 
@@ -2103,6 +2350,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
        /* Issue ABTS for this WQE based on iotag */
        ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
        if (!ctxp->abort_wqeq) {
+               atomic_inc(&tgtp->xmt_abort_rsp_error);
                lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
                                "6161 ABORT failed: No wqeqs: "
                                "xri: x%x\n", ctxp->oxid);
@@ -2127,6 +2375,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
        /* driver queued commands are in process of being flushed */
        if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
                spin_unlock_irqrestore(&phba->hbalock, flags);
+               atomic_inc(&tgtp->xmt_abort_rsp_error);
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
                                "6163 Driver in reset cleanup - flushing "
                                "NVME Req now. hba_flag x%x oxid x%x\n",
@@ -2139,6 +2388,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
        /* Outstanding abort is in progress */
        if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
                spin_unlock_irqrestore(&phba->hbalock, flags);
+               atomic_inc(&tgtp->xmt_abort_rsp_error);
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
                                "6164 Outstanding NVME I/O Abort Request "
                                "still pending on oxid x%x\n",
@@ -2189,9 +2439,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
        abts_wqeq->context2 = ctxp;
        rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
        spin_unlock_irqrestore(&phba->hbalock, flags);
-       if (rc == WQE_SUCCESS)
+       if (rc == WQE_SUCCESS) {
+               atomic_inc(&tgtp->xmt_abort_sol);
                return 0;
+       }
 
+       atomic_inc(&tgtp->xmt_abort_rsp_error);
        ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
        lpfc_sli_release_iocbq(phba, abts_wqeq);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
@@ -2214,7 +2467,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
        if (!ctxp->wqeq) {
-               ctxp->wqeq = ctxp->rqb_buffer->iocbq;
+               ctxp->wqeq = ctxp->ctxbuf->iocbq;
                ctxp->wqeq->hba_wqidx = 0;
        }
 
@@ -2230,11 +2483,11 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
        rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
        spin_unlock_irqrestore(&phba->hbalock, flags);
        if (rc == WQE_SUCCESS) {
-               atomic_inc(&tgtp->xmt_abort_rsp);
                return 0;
        }
 
 aerr:
+       atomic_inc(&tgtp->xmt_abort_rsp_error);
        ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
        atomic_inc(&tgtp->xmt_abort_rsp_error);
        lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
@@ -2269,6 +2522,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
        }
        abts_wqeq = ctxp->wqeq;
        wqe_abts = &abts_wqeq->wqe;
+
        lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
 
        spin_lock_irqsave(&phba->hbalock, flags);
@@ -2278,7 +2532,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
        rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
        spin_unlock_irqrestore(&phba->hbalock, flags);
        if (rc == WQE_SUCCESS) {
-               atomic_inc(&tgtp->xmt_abort_rsp);
+               atomic_inc(&tgtp->xmt_abort_unsol);
                return 0;
        }
 
index 128759fe665058dba133febdaa7140f29469f733..6eb2f5d8d4eda40f931097752c72f175354f14da 100644 (file)
@@ -22,6 +22,7 @@
  ********************************************************************/
 
 #define LPFC_NVMET_DEFAULT_SEGS                (64 + 1)        /* 256K IOs */
+#define LPFC_NVMET_RQE_DEF_COUNT       512
 #define LPFC_NVMET_SUCCESS_LEN 12
 
 /* Used for NVME Target */
@@ -34,6 +35,7 @@ struct lpfc_nvmet_tgtport {
        atomic_t rcv_ls_req_out;
        atomic_t rcv_ls_req_drop;
        atomic_t xmt_ls_abort;
+       atomic_t xmt_ls_abort_cmpl;
 
        /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
        atomic_t xmt_ls_rsp;
@@ -47,9 +49,9 @@ struct lpfc_nvmet_tgtport {
        atomic_t rcv_fcp_cmd_in;
        atomic_t rcv_fcp_cmd_out;
        atomic_t rcv_fcp_cmd_drop;
+       atomic_t xmt_fcp_release;
 
        /* Stats counters - lpfc_nvmet_xmt_fcp_op */
-       atomic_t xmt_fcp_abort;
        atomic_t xmt_fcp_drop;
        atomic_t xmt_fcp_read_rsp;
        atomic_t xmt_fcp_read;
@@ -62,12 +64,13 @@ struct lpfc_nvmet_tgtport {
        atomic_t xmt_fcp_rsp_drop;
 
 
-       /* Stats counters - lpfc_nvmet_unsol_issue_abort */
+       /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
+       atomic_t xmt_fcp_abort;
+       atomic_t xmt_fcp_abort_cmpl;
+       atomic_t xmt_abort_sol;
+       atomic_t xmt_abort_unsol;
        atomic_t xmt_abort_rsp;
        atomic_t xmt_abort_rsp_error;
-
-       /* Stats counters - lpfc_nvmet_xmt_abort_cmp */
-       atomic_t xmt_abort_cmpl;
 };
 
 struct lpfc_nvmet_rcv_ctx {
@@ -103,6 +106,7 @@ struct lpfc_nvmet_rcv_ctx {
 #define LPFC_NVMET_CTX_RLS             0x8  /* ctx free requested */
 #define LPFC_NVMET_ABTS_RCV            0x10  /* ABTS received on exchange */
        struct rqb_dmabuf *rqb_buffer;
+       struct lpfc_nvmet_ctxbuf *ctxbuf;
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        uint64_t ts_isr_cmd;
index 2a4fc00dfa9bdc7dbe42d5bed00935abd0a9e398..d6b184839bc2ff951233ee8fbcf477d6b133206f 100644 (file)
@@ -74,6 +74,8 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
                                                         struct lpfc_iocbq *);
 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
                                      struct hbq_dmabuf *);
+static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
+                                         struct hbq_dmabuf *dmabuf);
 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
                                    struct lpfc_cqe *);
 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
@@ -479,22 +481,23 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
        if (unlikely(!hq) || unlikely(!dq))
                return -ENOMEM;
        put_index = hq->host_index;
-       temp_hrqe = hq->qe[hq->host_index].rqe;
+       temp_hrqe = hq->qe[put_index].rqe;
        temp_drqe = dq->qe[dq->host_index].rqe;
 
        if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
                return -EINVAL;
-       if (hq->host_index != dq->host_index)
+       if (put_index != dq->host_index)
                return -EINVAL;
        /* If the host has not yet processed the next entry then we are done */
-       if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
+       if (((put_index + 1) % hq->entry_count) == hq->hba_index)
                return -EBUSY;
        lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
        lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
 
        /* Update the host index to point to the next slot */
-       hq->host_index = ((hq->host_index + 1) % hq->entry_count);
+       hq->host_index = ((put_index + 1) % hq->entry_count);
        dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+       hq->RQ_buf_posted++;
 
        /* Ring The Header Receive Queue Doorbell */
        if (!(hq->host_index % hq->entry_repost)) {
@@ -5906,7 +5909,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
                bf_set(lpfc_mbx_set_feature_mds,
                       &mbox->u.mqe.un.set_feature, 1);
                bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
-                      &mbox->u.mqe.un.set_feature, 0);
+                      &mbox->u.mqe.un.set_feature, 1);
                mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
                mbox->u.mqe.un.set_feature.param_len = 8;
                break;
@@ -6512,6 +6515,50 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
                 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
 }
 
+int
+lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+                   struct lpfc_queue *drq, int count, int idx)
+{
+       int rc, i;
+       struct lpfc_rqe hrqe;
+       struct lpfc_rqe drqe;
+       struct lpfc_rqb *rqbp;
+       struct rqb_dmabuf *rqb_buffer;
+       LIST_HEAD(rqb_buf_list);
+
+       rqbp = hrq->rqbp;
+       for (i = 0; i < count; i++) {
+               /* IF RQ is already full, don't bother */
+               if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
+                       break;
+               rqb_buffer = rqbp->rqb_alloc_buffer(phba);
+               if (!rqb_buffer)
+                       break;
+               rqb_buffer->hrq = hrq;
+               rqb_buffer->drq = drq;
+               rqb_buffer->idx = idx;
+               list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
+       }
+       while (!list_empty(&rqb_buf_list)) {
+               list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
+                                hbuf.list);
+
+               hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
+               hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
+               drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
+               drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
+               rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
+               if (rc < 0) {
+                       rqbp->rqb_free_buffer(phba, rqb_buffer);
+               } else {
+                       list_add_tail(&rqb_buffer->hbuf.list,
+                                     &rqbp->rqb_buffer_list);
+                       rqbp->buffer_count++;
+               }
+       }
+       return 1;
+}
+
 /**
  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
  * @phba: Pointer to HBA context object.
@@ -6524,7 +6571,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
 int
 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 {
-       int rc, i;
+       int rc, i, cnt;
        LPFC_MBOXQ_t *mboxq;
        struct lpfc_mqe *mqe;
        uint8_t *vpd;
@@ -6875,6 +6922,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                        goto out_destroy_queue;
                }
                phba->sli4_hba.nvmet_xri_cnt = rc;
+
+               cnt = phba->cfg_iocb_cnt * 1024;
+               /* We need 1 iocbq for every SGL, for IO processing */
+               cnt += phba->sli4_hba.nvmet_xri_cnt;
+               /* Initialize and populate the iocb list per host */
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2821 initialize iocb list %d total %d\n",
+                               phba->cfg_iocb_cnt, cnt);
+               rc = lpfc_init_iocb_list(phba, cnt);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "1413 Failed to init iocb list.\n");
+                       goto out_destroy_queue;
+               }
+
                lpfc_nvmet_create_targetport(phba);
        } else {
                /* update host scsi xri-sgl sizes and mappings */
@@ -6894,28 +6956,34 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                                        "and mapping: %d\n", rc);
                        goto out_destroy_queue;
                }
+
+               cnt = phba->cfg_iocb_cnt * 1024;
+               /* Initialize and populate the iocb list per host */
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2820 initialize iocb list %d total %d\n",
+                               phba->cfg_iocb_cnt, cnt);
+               rc = lpfc_init_iocb_list(phba, cnt);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "6301 Failed to init iocb list.\n");
+                       goto out_destroy_queue;
+               }
        }
 
        if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
-
                /* Post initial buffers to all RQs created */
                for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
                        rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
                        INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
                        rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
                        rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
-                       rqbp->entry_count = 256;
+                       rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
                        rqbp->buffer_count = 0;
 
-                       /* Divide by 4 and round down to multiple of 16 */
-                       rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8;
-                       phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc;
-                       phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc;
-
                        lpfc_post_rq_buffer(
                                phba, phba->sli4_hba.nvmet_mrq_hdr[i],
                                phba->sli4_hba.nvmet_mrq_data[i],
-                               phba->cfg_nvmet_mrq_post);
+                               LPFC_NVMET_RQE_DEF_COUNT, i);
                }
        }
 
@@ -7082,6 +7150,7 @@ out_unset_queue:
        /* Unset all the queues set up in this routine when error out */
        lpfc_sli4_queue_unset(phba);
 out_destroy_queue:
+       lpfc_free_iocb_list(phba);
        lpfc_sli4_queue_destroy(phba);
 out_stop_timers:
        lpfc_stop_hba_timers(phba);
@@ -8621,8 +8690,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                memset(wqe, 0, sizeof(union lpfc_wqe128));
        /* Some of the fields are in the right position already */
        memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
-       wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
-       wqe->generic.wqe_com.word10 = 0;
+       if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
+               /* The ct field has moved so reset */
+               wqe->generic.wqe_com.word7 = 0;
+               wqe->generic.wqe_com.word10 = 0;
+       }
 
        abort_tag = (uint32_t) iocbq->iotag;
        xritag = iocbq->sli4_xritag;
@@ -9116,6 +9188,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                }
 
                break;
+       case CMD_SEND_FRAME:
+               bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+               bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+               return 0;
        case CMD_XRI_ABORTED_CX:
        case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
        case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@@ -12788,6 +12864,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
        struct fc_frame_header *fc_hdr;
        struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
        struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
+       struct lpfc_nvmet_tgtport *tgtp;
        struct hbq_dmabuf *dma_buf;
        uint32_t status, rq_id;
        unsigned long iflags;
@@ -12808,7 +12885,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
        case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "2537 Receive Frame Truncated!!\n");
-               hrq->RQ_buf_trunc++;
        case FC_STATUS_RQ_SUCCESS:
                lpfc_sli4_rq_release(hrq, drq);
                spin_lock_irqsave(&phba->hbalock, iflags);
@@ -12819,6 +12895,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
                        goto out;
                }
                hrq->RQ_rcv_buf++;
+               hrq->RQ_buf_posted--;
                memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
 
                /* If a NVME LS event (type 0x28), treat it as Fast path */
@@ -12832,8 +12909,21 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
                spin_unlock_irqrestore(&phba->hbalock, iflags);
                workposted = true;
                break;
-       case FC_STATUS_INSUFF_BUF_NEED_BUF:
        case FC_STATUS_INSUFF_BUF_FRM_DISC:
+               if (phba->nvmet_support) {
+                       tgtp = phba->targetport->private;
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
+                                       "6402 RQE Error x%x, posted %d err_cnt "
+                                       "%d: %x %x %x\n",
+                                       status, hrq->RQ_buf_posted,
+                                       hrq->RQ_no_posted_buf,
+                                       atomic_read(&tgtp->rcv_fcp_cmd_in),
+                                       atomic_read(&tgtp->rcv_fcp_cmd_out),
+                                       atomic_read(&tgtp->xmt_fcp_release));
+               }
+               /* fallthrough */
+
+       case FC_STATUS_INSUFF_BUF_NEED_BUF:
                hrq->RQ_no_posted_buf++;
                /* Post more buffers if possible */
                spin_lock_irqsave(&phba->hbalock, iflags);
@@ -12951,7 +13041,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
                while ((cqe = lpfc_sli4_cq_get(cq))) {
                        workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
                        if (!(++ecount % cq->entry_repost))
-                               lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+                               break;
                        cq->CQ_mbox++;
                }
                break;
@@ -12965,7 +13055,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
                                workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
                                                                      cqe);
                        if (!(++ecount % cq->entry_repost))
-                               lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+                               break;
                }
 
                /* Track the max number of CQEs processed in 1 EQ */
@@ -13135,6 +13225,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
        struct lpfc_queue *drq;
        struct rqb_dmabuf *dma_buf;
        struct fc_frame_header *fc_hdr;
+       struct lpfc_nvmet_tgtport *tgtp;
        uint32_t status, rq_id;
        unsigned long iflags;
        uint32_t fctl, idx;
@@ -13165,8 +13256,6 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
        case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "6126 Receive Frame Truncated!!\n");
-               hrq->RQ_buf_trunc++;
-               break;
        case FC_STATUS_RQ_SUCCESS:
                lpfc_sli4_rq_release(hrq, drq);
                spin_lock_irqsave(&phba->hbalock, iflags);
@@ -13178,6 +13267,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
                }
                spin_unlock_irqrestore(&phba->hbalock, iflags);
                hrq->RQ_rcv_buf++;
+               hrq->RQ_buf_posted--;
                fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
 
                /* Just some basic sanity checks on FCP Command frame */
@@ -13200,14 +13290,23 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
 drop:
                lpfc_in_buf_free(phba, &dma_buf->dbuf);
                break;
-       case FC_STATUS_INSUFF_BUF_NEED_BUF:
        case FC_STATUS_INSUFF_BUF_FRM_DISC:
+               if (phba->nvmet_support) {
+                       tgtp = phba->targetport->private;
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
+                                       "6401 RQE Error x%x, posted %d err_cnt "
+                                       "%d: %x %x %x\n",
+                                       status, hrq->RQ_buf_posted,
+                                       hrq->RQ_no_posted_buf,
+                                       atomic_read(&tgtp->rcv_fcp_cmd_in),
+                                       atomic_read(&tgtp->rcv_fcp_cmd_out),
+                                       atomic_read(&tgtp->xmt_fcp_release));
+               }
+               /* fallthrough */
+
+       case FC_STATUS_INSUFF_BUF_NEED_BUF:
                hrq->RQ_no_posted_buf++;
                /* Post more buffers if possible */
-               spin_lock_irqsave(&phba->hbalock, iflags);
-               phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
-               spin_unlock_irqrestore(&phba->hbalock, iflags);
-               workposted = true;
                break;
        }
 out:
@@ -13361,7 +13460,7 @@ process_cq:
        while ((cqe = lpfc_sli4_cq_get(cq))) {
                workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
                if (!(++ecount % cq->entry_repost))
-                       lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+                       break;
        }
 
        /* Track the max number of CQEs processed in 1 EQ */
@@ -13452,7 +13551,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
        while ((cqe = lpfc_sli4_cq_get(cq))) {
                workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
                if (!(++ecount % cq->entry_repost))
-                       lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+                       break;
        }
 
        /* Track the max number of CQEs processed in 1 EQ */
@@ -13534,7 +13633,7 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
        while ((eqe = lpfc_sli4_eq_get(eq))) {
                lpfc_sli4_fof_handle_eqe(phba, eqe);
                if (!(++ecount % eq->entry_repost))
-                       lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+                       break;
                eq->EQ_processed++;
        }
 
@@ -13651,7 +13750,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
 
                lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
                if (!(++ecount % fpeq->entry_repost))
-                       lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
+                       break;
                fpeq->EQ_processed++;
        }
 
@@ -13832,17 +13931,10 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
        }
        queue->entry_size = entry_size;
        queue->entry_count = entry_count;
-
-       /*
-        * entry_repost is calculated based on the number of entries in the
-        * queue. This works out except for RQs. If buffers are NOT initially
-        * posted for every RQE, entry_repost should be adjusted accordingly.
-        */
-       queue->entry_repost = (entry_count >> 3);
-       if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
-               queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
        queue->phba = phba;
 
+       /* entry_repost will be set during q creation */
+
        return queue;
 out_fail:
        lpfc_sli4_queue_free(queue);
@@ -14073,6 +14165,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
                status = -ENXIO;
        eq->host_index = 0;
        eq->hba_index = 0;
+       eq->entry_repost = LPFC_EQ_REPOST;
 
        mempool_free(mbox, phba->mbox_mem_pool);
        return status;
@@ -14146,9 +14239,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "0361 Unsupported CQ count: "
-                               "entry cnt %d sz %d pg cnt %d repost %d\n",
+                               "entry cnt %d sz %d pg cnt %d\n",
                                cq->entry_count, cq->entry_size,
-                               cq->page_count, cq->entry_repost);
+                               cq->page_count);
                if (cq->entry_count < 256) {
                        status = -EINVAL;
                        goto out;
@@ -14201,6 +14294,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
        cq->assoc_qid = eq->queue_id;
        cq->host_index = 0;
        cq->hba_index = 0;
+       cq->entry_repost = LPFC_CQ_REPOST;
 
 out:
        mempool_free(mbox, phba->mbox_mem_pool);
@@ -14392,6 +14486,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
                cq->assoc_qid = eq->queue_id;
                cq->host_index = 0;
                cq->hba_index = 0;
+               cq->entry_repost = LPFC_CQ_REPOST;
 
                rc = 0;
                list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14640,6 +14735,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
        mq->subtype = subtype;
        mq->host_index = 0;
        mq->hba_index = 0;
+       mq->entry_repost = LPFC_MQ_REPOST;
 
        /* link the mq onto the parent cq child list */
        list_add_tail(&mq->list, &cq->child_list);
@@ -14864,34 +14960,6 @@ out:
        return status;
 }
 
-/**
- * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
- * @phba: HBA structure that indicates port to create a queue on.
- * @rq:   The queue structure to use for the receive queue.
- * @qno:  The associated HBQ number
- *
- *
- * For SLI4 we need to adjust the RQ repost value based on
- * the number of buffers that are initially posted to the RQ.
- */
-void
-lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
-{
-       uint32_t cnt;
-
-       /* sanity check on queue memory */
-       if (!rq)
-               return;
-       cnt = lpfc_hbq_defs[qno]->entry_count;
-
-       /* Recalc repost for RQs based on buffers initially posted */
-       cnt = (cnt >> 3);
-       if (cnt < LPFC_QUEUE_MIN_REPOST)
-               cnt = LPFC_QUEUE_MIN_REPOST;
-
-       rq->entry_repost = cnt;
-}
-
 /**
  * lpfc_rq_create - Create a Receive Queue on the HBA
  * @phba: HBA structure that indicates port to create a queue on.
@@ -15077,6 +15145,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        hrq->subtype = subtype;
        hrq->host_index = 0;
        hrq->hba_index = 0;
+       hrq->entry_repost = LPFC_RQ_REPOST;
 
        /* now create the data queue */
        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -15087,7 +15156,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
                bf_set(lpfc_rq_context_rqe_count_1,
                       &rq_create->u.request.context, hrq->entry_count);
-               rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
+               if (subtype == LPFC_NVMET)
+                       rq_create->u.request.context.buffer_size =
+                               LPFC_NVMET_DATA_BUF_SIZE;
+               else
+                       rq_create->u.request.context.buffer_size =
+                               LPFC_DATA_BUF_SIZE;
                bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
                       LPFC_RQE_SIZE_8);
                bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
@@ -15124,8 +15198,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                               LPFC_RQ_RING_SIZE_4096);
                        break;
                }
-               bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
-                      LPFC_DATA_BUF_SIZE);
+               if (subtype == LPFC_NVMET)
+                       bf_set(lpfc_rq_context_buf_size,
+                              &rq_create->u.request.context,
+                              LPFC_NVMET_DATA_BUF_SIZE);
+               else
+                       bf_set(lpfc_rq_context_buf_size,
+                              &rq_create->u.request.context,
+                              LPFC_DATA_BUF_SIZE);
        }
        bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
               cq->queue_id);
@@ -15158,6 +15238,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        drq->subtype = subtype;
        drq->host_index = 0;
        drq->hba_index = 0;
+       drq->entry_repost = LPFC_RQ_REPOST;
 
        /* link the header and data RQs onto the parent cq child list */
        list_add_tail(&hrq->list, &cq->child_list);
@@ -15270,7 +15351,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
                               cq->queue_id);
                        bf_set(lpfc_rq_context_data_size,
                               &rq_create->u.request.context,
-                              LPFC_DATA_BUF_SIZE);
+                              LPFC_NVMET_DATA_BUF_SIZE);
                        bf_set(lpfc_rq_context_hdr_size,
                               &rq_create->u.request.context,
                               LPFC_HDR_BUF_SIZE);
@@ -15315,6 +15396,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
                hrq->subtype = subtype;
                hrq->host_index = 0;
                hrq->hba_index = 0;
+               hrq->entry_repost = LPFC_RQ_REPOST;
 
                drq->db_format = LPFC_DB_RING_FORMAT;
                drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -15323,6 +15405,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
                drq->subtype = subtype;
                drq->host_index = 0;
                drq->hba_index = 0;
+               drq->entry_repost = LPFC_RQ_REPOST;
 
                list_add_tail(&hrq->list, &cq->child_list);
                list_add_tail(&drq->list, &cq->child_list);
@@ -16063,6 +16146,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        struct fc_vft_header *fc_vft_hdr;
        uint32_t *header = (uint32_t *) fc_hdr;
 
+#define FC_RCTL_MDS_DIAGS      0xF4
+
        switch (fc_hdr->fh_r_ctl) {
        case FC_RCTL_DD_UNCAT:          /* uncategorized information */
        case FC_RCTL_DD_SOL_DATA:       /* solicited data */
@@ -16090,6 +16175,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        case FC_RCTL_F_BSY:     /* fabric busy to data frame */
        case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
        case FC_RCTL_LCR:       /* link credit reset */
+       case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
        case FC_RCTL_END:       /* end */
                break;
        case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
@@ -16099,12 +16185,16 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        default:
                goto drop;
        }
+
+#define FC_TYPE_VENDOR_UNIQUE  0xFF
+
        switch (fc_hdr->fh_type) {
        case FC_TYPE_BLS:
        case FC_TYPE_ELS:
        case FC_TYPE_FCP:
        case FC_TYPE_CT:
        case FC_TYPE_NVME:
+       case FC_TYPE_VENDOR_UNIQUE:
                break;
        case FC_TYPE_IP:
        case FC_TYPE_ILS:
@@ -16115,12 +16205,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
                        "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
                        "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+                       (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" :
                        lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
-                       lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type,
-                       be32_to_cpu(header[0]), be32_to_cpu(header[1]),
-                       be32_to_cpu(header[2]), be32_to_cpu(header[3]),
-                       be32_to_cpu(header[4]), be32_to_cpu(header[5]),
-                       be32_to_cpu(header[6]));
+                       (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ?
+                       "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type],
+                       fc_hdr->fh_type, be32_to_cpu(header[0]),
+                       be32_to_cpu(header[1]), be32_to_cpu(header[2]),
+                       be32_to_cpu(header[3]), be32_to_cpu(header[4]),
+                       be32_to_cpu(header[5]), be32_to_cpu(header[6]));
        return 0;
 drop:
        lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -16926,6 +17018,96 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
        lpfc_sli_release_iocbq(phba, iocbq);
 }
 
+static void
+lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+                           struct lpfc_iocbq *rspiocb)
+{
+       struct lpfc_dmabuf *pcmd = cmdiocb->context2;
+
+       if (pcmd && pcmd->virt)
+               pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+       kfree(pcmd);
+       lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+static void
+lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
+                             struct hbq_dmabuf *dmabuf)
+{
+       struct fc_frame_header *fc_hdr;
+       struct lpfc_hba *phba = vport->phba;
+       struct lpfc_iocbq *iocbq = NULL;
+       union  lpfc_wqe *wqe;
+       struct lpfc_dmabuf *pcmd = NULL;
+       uint32_t frame_len;
+       int rc;
+
+       fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+       frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
+
+       /* Send the received frame back */
+       iocbq = lpfc_sli_get_iocbq(phba);
+       if (!iocbq)
+               goto exit;
+
+       /* Allocate buffer for command payload */
+       pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       if (pcmd)
+               pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+                                           &pcmd->phys);
+       if (!pcmd || !pcmd->virt)
+               goto exit;
+
+       INIT_LIST_HEAD(&pcmd->list);
+
+       /* copyin the payload */
+       memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
+
+       /* fill in BDE's for command */
+       iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
+       iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
+       iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+       iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
+
+       iocbq->context2 = pcmd;
+       iocbq->vport = vport;
+       iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
+       iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+
+       /*
+        * Setup rest of the iocb as though it were a WQE
+        * Build the SEND_FRAME WQE
+        */
+       wqe = (union lpfc_wqe *)&iocbq->iocb;
+
+       wqe->send_frame.frame_len = frame_len;
+       wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
+       wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
+       wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
+       wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
+       wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
+       wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
+
+       iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
+       iocbq->iocb.ulpLe = 1;
+       iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
+       if (rc == IOCB_ERROR)
+               goto exit;
+
+       lpfc_in_buf_free(phba, &dmabuf->dbuf);
+       return;
+
+exit:
+       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                       "2023 Unable to process MDS loopback frame\n");
+       if (pcmd && pcmd->virt)
+               pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+       kfree(pcmd);
+       lpfc_sli_release_iocbq(phba, iocbq);
+       lpfc_in_buf_free(phba, &dmabuf->dbuf);
+}
+
 /**
  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
  * @phba: Pointer to HBA context object.
@@ -16964,6 +17146,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
                fcfi = bf_get(lpfc_rcqe_fcf_id,
                              &dmabuf->cq_event.cqe.rcqe_cmpl);
 
+       if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
+               vport = phba->pport;
+               /* Handle MDS Loopback frames */
+               lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+               return;
+       }
+
        /* d_id this frame is directed to */
        did = sli4_did_from_fc_hdr(fc_hdr);
 
@@ -17137,6 +17326,14 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
                                "status x%x add_status x%x, mbx status x%x\n",
                                shdr_status, shdr_add_status, rc);
                rc = -ENXIO;
+       } else {
+               /*
+                * The next_rpi stores the next logical module-64 rpi value used
+                * to post physical rpis in subsequent rpi postings.
+                */
+               spin_lock_irq(&phba->hbalock);
+               phba->sli4_hba.next_rpi = rpi_page->next_rpi;
+               spin_unlock_irq(&phba->hbalock);
        }
        return rc;
 }
@@ -18717,7 +18914,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
 
                spin_lock_irqsave(&pring->ring_lock, iflags);
                ctxp = pwqe->context2;
-               sglq = ctxp->rqb_buffer->sglq;
+               sglq = ctxp->ctxbuf->sglq;
                if (pwqe->sli4_xritag ==  NO_XRI) {
                        pwqe->sli4_lxritag = sglq->sli4_lxritag;
                        pwqe->sli4_xritag = sglq->sli4_xritag;
index da46471337c8a7e3b6c38164bc4afe1a4f0087c2..cf863db27700a2400463bcc65777ec15f9806da4 100644 (file)
@@ -24,7 +24,6 @@
 #define LPFC_XRI_EXCH_BUSY_WAIT_TMO            10000
 #define LPFC_XRI_EXCH_BUSY_WAIT_T1             10
 #define LPFC_XRI_EXCH_BUSY_WAIT_T2              30000
-#define LPFC_RELEASE_NOTIFICATION_INTERVAL     32
 #define LPFC_RPI_LOW_WATER_MARK                        10
 
 #define LPFC_UNREG_FCF                          1
@@ -155,7 +154,11 @@ struct lpfc_queue {
        uint32_t entry_count;   /* Number of entries to support on the queue */
        uint32_t entry_size;    /* Size of each queue entry. */
        uint32_t entry_repost;  /* Count of entries before doorbell is rung */
-#define LPFC_QUEUE_MIN_REPOST  8
+#define LPFC_EQ_REPOST         8
+#define LPFC_MQ_REPOST         8
+#define LPFC_CQ_REPOST         64
+#define LPFC_RQ_REPOST         64
+#define LPFC_RELEASE_NOTIFICATION_INTERVAL     32  /* For WQs */
        uint32_t queue_id;      /* Queue ID assigned by the hardware */
        uint32_t assoc_qid;     /* Queue ID associated with, for CQ/WQ/MQ */
        uint32_t page_count;    /* Number of pages allocated for this queue */
@@ -195,7 +198,7 @@ struct lpfc_queue {
 /* defines for RQ stats */
 #define        RQ_no_posted_buf        q_cnt_1
 #define        RQ_no_buf_found         q_cnt_2
-#define        RQ_buf_trunc            q_cnt_3
+#define        RQ_buf_posted           q_cnt_3
 #define        RQ_rcv_buf              q_cnt_4
 
        uint64_t isr_timestamp;
@@ -617,12 +620,17 @@ struct lpfc_sli4_hba {
        uint16_t scsi_xri_start;
        uint16_t els_xri_cnt;
        uint16_t nvmet_xri_cnt;
+       uint16_t nvmet_ctx_cnt;
+       uint16_t nvmet_io_wait_cnt;
+       uint16_t nvmet_io_wait_total;
        struct list_head lpfc_els_sgl_list;
        struct list_head lpfc_abts_els_sgl_list;
        struct list_head lpfc_nvmet_sgl_list;
        struct list_head lpfc_abts_nvmet_ctx_list;
        struct list_head lpfc_abts_scsi_buf_list;
        struct list_head lpfc_abts_nvme_buf_list;
+       struct list_head lpfc_nvmet_ctx_list;
+       struct list_head lpfc_nvmet_io_wait_list;
        struct lpfc_sglq **lpfc_sglq_active_list;
        struct list_head lpfc_rpi_hdr_list;
        unsigned long *rpi_bmask;
@@ -654,6 +662,7 @@ struct lpfc_sli4_hba {
        spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
        spinlock_t sgl_list_lock; /* list of aborted els IOs */
        spinlock_t nvmet_io_lock;
+       spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
        uint32_t physical_port;
 
        /* CPU to vector mapping information */
@@ -661,8 +670,6 @@ struct lpfc_sli4_hba {
        uint16_t num_online_cpu;
        uint16_t num_present_cpu;
        uint16_t curr_disp_cpu;
-
-       uint16_t nvmet_mrq_post_idx;
 };
 
 enum lpfc_sge_type {
@@ -698,6 +705,7 @@ struct lpfc_rpi_hdr {
        struct lpfc_dmabuf *dmabuf;
        uint32_t page_count;
        uint32_t start_rpi;
+       uint16_t next_rpi;
 };
 
 struct lpfc_rsrc_blks {
@@ -762,7 +770,6 @@ int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
 int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
                        struct lpfc_queue **drqp, struct lpfc_queue **cqp,
                        uint32_t subtype);
-void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
 int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
 int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
 int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
index 1c26dc67151b65e050b729e63a0871cb6846564f..c2653244221cb1b2cb987d962686daeed43094c3 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "11.2.0.12"
+#define LPFC_DRIVER_VERSION "11.2.0.14"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 5ca3e8c28a3f6af3b4faf1d82ec1ab6f37480127..32632c9b22766d59e223a256c430810a4fa33bec 100644 (file)
@@ -38,7 +38,7 @@ struct qedi_endpoint;
 #define QEDI_MAX_ISCSI_TASK            4096
 #define QEDI_MAX_TASK_NUM              0x0FFF
 #define QEDI_MAX_ISCSI_CONNS_PER_HBA   1024
-#define QEDI_ISCSI_MAX_BDS_PER_CMD     256     /* Firmware max BDs is 256 */
+#define QEDI_ISCSI_MAX_BDS_PER_CMD     255     /* Firmware max BDs is 255 */
 #define MAX_OUSTANDING_TASKS_PER_CON   1024
 
 #define QEDI_MAX_BD_LEN                0xffff
@@ -63,6 +63,7 @@ struct qedi_endpoint;
 #define QEDI_PAGE_MASK         (~((QEDI_PAGE_SIZE) - 1))
 
 #define QEDI_PAGE_SIZE         4096
+#define QEDI_HW_DMA_BOUNDARY   0xfff
 #define QEDI_PATH_HANDLE       0xFE0000000UL
 
 struct qedi_uio_ctrl {
index d6978cbc56f0586aa8a075191433184c50c93b01..8bc7ee1a8ca81626829329e80831ffa2d57b8c63 100644 (file)
@@ -1494,6 +1494,8 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
        tmf_hdr = (struct iscsi_tm *)mtask->hdr;
        qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
        ep = qedi_conn->ep;
+       if (!ep)
+               return -ENODEV;
 
        tid = qedi_get_task_idx(qedi);
        if (tid == -1)
index 3548d46f9b275825a9f76765966cf8c943762e88..87f0af358b33ae4563ba2ee83a1aaa4ce9ca7945 100644 (file)
@@ -59,6 +59,7 @@ struct scsi_host_template qedi_host_template = {
        .this_id = -1,
        .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
        .max_sectors = 0xffff,
+       .dma_boundary = QEDI_HW_DMA_BOUNDARY,
        .cmd_per_lun = 128,
        .use_clustering = ENABLE_CLUSTERING,
        .shost_attrs = qedi_shost_attrs,
@@ -1223,8 +1224,12 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
 
        iscsi_cid = (u32)path_data->handle;
        qedi_ep = qedi->ep_tbl[iscsi_cid];
-       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+       if (!qedi_ep) {
+               ret = -EINVAL;
+               goto set_path_exit;
+       }
 
        if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
                QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
index 92775a8b74b1cdc068b8d8808b7bd9ccd5b212eb..09a294634bc7e8898a2d209a9a5cef3d50eb8f32 100644 (file)
@@ -151,6 +151,11 @@ static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
 
 static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
 {
+       if (udev->uctrl) {
+               free_page((unsigned long)udev->uctrl);
+               udev->uctrl = NULL;
+       }
+
        if (udev->ll2_ring) {
                free_page((unsigned long)udev->ll2_ring);
                udev->ll2_ring = NULL;
@@ -169,7 +174,6 @@ static void __qedi_free_uio(struct qedi_uio_dev *udev)
        __qedi_free_uio_rings(udev);
 
        pci_dev_put(udev->pdev);
-       kfree(udev->uctrl);
        kfree(udev);
 }
 
@@ -208,6 +212,11 @@ static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
        if (udev->ll2_ring || udev->ll2_buf)
                return rc;
 
+       /* Memory for control area.  */
+       udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!udev->uctrl)
+               return -ENOMEM;
+
        /* Allocating memory for LL2 ring  */
        udev->ll2_ring_size = QEDI_PAGE_SIZE;
        udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
@@ -237,7 +246,6 @@ exit_alloc_ring:
 static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
 {
        struct qedi_uio_dev *udev = NULL;
-       struct qedi_uio_ctrl *uctrl = NULL;
        int rc = 0;
 
        list_for_each_entry(udev, &qedi_udev_list, list) {
@@ -258,21 +266,14 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
                goto err_udev;
        }
 
-       uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
-       if (!uctrl) {
-               rc = -ENOMEM;
-               goto err_uctrl;
-       }
-
        udev->uio_dev = -1;
 
        udev->qedi = qedi;
        udev->pdev = qedi->pdev;
-       udev->uctrl = uctrl;
 
        rc = __qedi_alloc_uio_rings(udev);
        if (rc)
-               goto err_uio_rings;
+               goto err_uctrl;
 
        list_add(&udev->list, &qedi_udev_list);
 
@@ -283,8 +284,6 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
        udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
        return 0;
 
- err_uio_rings:
-       kfree(uctrl);
  err_uctrl:
        kfree(udev);
  err_udev:
@@ -828,6 +827,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
        qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
        qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
        qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+       qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
+       qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
 
        for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
                if ((1 << log_page_size) == PAGE_SIZE)
index e31f1cc90b815b28a332d1a6915c82e19519a62f..99e16ac479e365d343840f44c9f7b0c50b4042cc 100644 (file)
@@ -1851,7 +1851,7 @@ static int scsi_mq_prep_fn(struct request *req)
 
        /* zero out the cmd, except for the embedded scsi_request */
        memset((char *)cmd + sizeof(cmd->req), 0,
-               sizeof(*cmd) - sizeof(cmd->req));
+               sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size);
 
        req->special = cmd;
 
index f9d1432d7cc589360354c9d3fdbe4c0967e23ee6..b6bb4e0ce0e3288f321ef1319621063f02d6c868 100644 (file)
@@ -827,21 +827,32 @@ static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
        u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+       int ret;
 
        if (!(rq->cmd_flags & REQ_NOUNMAP)) {
                switch (sdkp->zeroing_mode) {
                case SD_ZERO_WS16_UNMAP:
-                       return sd_setup_write_same16_cmnd(cmd, true);
+                       ret = sd_setup_write_same16_cmnd(cmd, true);
+                       goto out;
                case SD_ZERO_WS10_UNMAP:
-                       return sd_setup_write_same10_cmnd(cmd, true);
+                       ret = sd_setup_write_same10_cmnd(cmd, true);
+                       goto out;
                }
        }
 
        if (sdp->no_write_same)
                return BLKPREP_INVALID;
+
        if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
-               return sd_setup_write_same16_cmnd(cmd, false);
-       return sd_setup_write_same10_cmnd(cmd, false);
+               ret = sd_setup_write_same16_cmnd(cmd, false);
+       else
+               ret = sd_setup_write_same10_cmnd(cmd, false);
+
+out:
+       if (sd_is_zoned(sdkp) && ret == BLKPREP_OK)
+               return sd_zbc_write_lock_zone(cmd);
+
+       return ret;
 }
 
 static void sd_config_write_same(struct scsi_disk *sdkp)
@@ -948,6 +959,10 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
        rq->__data_len = sdp->sector_size;
        ret = scsi_init_io(cmd);
        rq->__data_len = nr_bytes;
+
+       if (sd_is_zoned(sdkp) && ret != BLKPREP_OK)
+               sd_zbc_write_unlock_zone(cmd);
+
        return ret;
 }
 
@@ -1567,17 +1582,21 @@ out:
        return retval;
 }
 
-static int sd_sync_cache(struct scsi_disk *sdkp)
+static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
 {
        int retries, res;
        struct scsi_device *sdp = sdkp->device;
        const int timeout = sdp->request_queue->rq_timeout
                * SD_FLUSH_TIMEOUT_MULTIPLIER;
-       struct scsi_sense_hdr sshdr;
+       struct scsi_sense_hdr my_sshdr;
 
        if (!scsi_device_online(sdp))
                return -ENODEV;
 
+       /* caller might not be interested in sense, but we need it */
+       if (!sshdr)
+               sshdr = &my_sshdr;
+
        for (retries = 3; retries > 0; --retries) {
                unsigned char cmd[10] = { 0 };
 
@@ -1586,7 +1605,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
                 * Leave the rest of the command zero to indicate
                 * flush everything.
                 */
-               res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+               res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
                                timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL);
                if (res == 0)
                        break;
@@ -1596,11 +1615,12 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
                sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
 
                if (driver_byte(res) & DRIVER_SENSE)
-                       sd_print_sense_hdr(sdkp, &sshdr);
+                       sd_print_sense_hdr(sdkp, sshdr);
+
                /* we need to evaluate the error return  */
-               if (scsi_sense_valid(&sshdr) &&
-                       (sshdr.asc == 0x3a ||   /* medium not present */
-                        sshdr.asc == 0x20))    /* invalid command */
+               if (scsi_sense_valid(sshdr) &&
+                       (sshdr->asc == 0x3a ||  /* medium not present */
+                        sshdr->asc == 0x20))   /* invalid command */
                                /* this is no error here */
                                return 0;
 
@@ -3444,7 +3464,7 @@ static void sd_shutdown(struct device *dev)
 
        if (sdkp->WCE && sdkp->media_present) {
                sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
-               sd_sync_cache(sdkp);
+               sd_sync_cache(sdkp, NULL);
        }
 
        if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
@@ -3456,6 +3476,7 @@ static void sd_shutdown(struct device *dev)
 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
 {
        struct scsi_disk *sdkp = dev_get_drvdata(dev);
+       struct scsi_sense_hdr sshdr;
        int ret = 0;
 
        if (!sdkp)      /* E.g.: runtime suspend following sd_remove() */
@@ -3463,12 +3484,23 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
 
        if (sdkp->WCE && sdkp->media_present) {
                sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
-               ret = sd_sync_cache(sdkp);
+               ret = sd_sync_cache(sdkp, &sshdr);
+
                if (ret) {
                        /* ignore OFFLINE device */
                        if (ret == -ENODEV)
-                               ret = 0;
-                       goto done;
+                               return 0;
+
+                       if (!scsi_sense_valid(&sshdr) ||
+                           sshdr.sense_key != ILLEGAL_REQUEST)
+                               return ret;
+
+                       /*
+                        * sshdr.sense_key == ILLEGAL_REQUEST means this drive
+                        * doesn't support sync. There's not much to do and
+                        * suspend shouldn't fail.
+                        */
+                        ret = 0;
                }
        }
 
@@ -3480,7 +3512,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
                        ret = 0;
        }
 
-done:
        return ret;
 }
 
index 0a38ba01b7b4aac2151faf88421c8f2645dbd86a..82c33a6edbeaa7a00e6f7840ef4b5d8cdb8a084f 100644 (file)
@@ -2074,11 +2074,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
                if ((1 == resp->done) && (!resp->sg_io_owned) &&
                    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
                        resp->done = 2; /* guard against other readers */
-                       break;
+                       write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+                       return resp;
                }
        }
        write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
-       return resp;
+       return NULL;
 }
 
 /* always adds to end of list */
index abc7e87937cc3087617ca404dc73b01b39ff7822..ffe8d86088181c7da9c30ad58986418842f7016d 100644 (file)
@@ -7698,6 +7698,12 @@ static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
        ufshcd_add_spm_lvl_sysfs_nodes(hba);
 }
 
+static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
+{
+       device_remove_file(hba->dev, &hba->rpm_lvl_attr);
+       device_remove_file(hba->dev, &hba->spm_lvl_attr);
+}
+
 /**
  * ufshcd_shutdown - shutdown routine
  * @hba: per adapter instance
@@ -7735,6 +7741,7 @@ EXPORT_SYMBOL(ufshcd_shutdown);
  */
 void ufshcd_remove(struct ufs_hba *hba)
 {
+       ufshcd_remove_sysfs_nodes(hba);
        scsi_remove_host(hba->host);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
index 8ea01904c0eae72b9273919c0214510072e89813..466517c7c8e618112dcdea3716631be9f277d400 100644 (file)
@@ -19,5 +19,3 @@ obj-$(CONFIG_VIDEO_AP1302)     += ap1302.o
 
 obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
 
-ccflags-y += -Werror
-
index 1d7f7ab94cac3b7ebf454b16a890dab9dc255314..6b13a3a66e49e3ee064887fa324685ddf05d1a0c 100644 (file)
@@ -4,5 +4,3 @@ imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o o
 
 ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
 obj-$(CONFIG_VIDEO_OV8858)     += ov8858_driver.o
-
-ccflags-y += -Werror
index fceb9e9b881bac608e81ae76ed50dad139e2432e..c9c0e1245858470147768987c12de5727e68ad22 100644 (file)
@@ -1,3 +1 @@
 obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
-
-ccflags-y += -Werror
index 3fa7c1c1479f330367b7ab01bf41d8c502a69349..f126a89a08e93ff6511b603960285bfbe3a3c4f7 100644 (file)
@@ -351,5 +351,5 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
 DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
 DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
 
-ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror
+ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
 
index 26a9bcd5ee6a40c391195ef13e205d8d936c8a54..0d8f81591bed076fa1f89f7cd27360776488f349 100644 (file)
@@ -3790,6 +3790,8 @@ int iscsi_target_tx_thread(void *arg)
 {
        int ret = 0;
        struct iscsi_conn *conn = arg;
+       bool conn_freed = false;
+
        /*
         * Allow ourselves to be interrupted by SIGINT so that a
         * connection recovery / failure event can be triggered externally.
@@ -3815,12 +3817,14 @@ get_immediate:
                        goto transport_err;
 
                ret = iscsit_handle_response_queue(conn);
-               if (ret == 1)
+               if (ret == 1) {
                        goto get_immediate;
-               else if (ret == -ECONNRESET)
+               } else if (ret == -ECONNRESET) {
+                       conn_freed = true;
                        goto out;
-               else if (ret < 0)
+               } else if (ret < 0) {
                        goto transport_err;
+               }
        }
 
 transport_err:
@@ -3830,8 +3834,13 @@ transport_err:
         * responsible for cleaning up the early connection failure.
         */
        if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
-               iscsit_take_action_for_connection_exit(conn);
+               iscsit_take_action_for_connection_exit(conn, &conn_freed);
 out:
+       if (!conn_freed) {
+               while (!kthread_should_stop()) {
+                       msleep(100);
+               }
+       }
        return 0;
 }
 
@@ -4004,6 +4013,7 @@ int iscsi_target_rx_thread(void *arg)
 {
        int rc;
        struct iscsi_conn *conn = arg;
+       bool conn_freed = false;
 
        /*
         * Allow ourselves to be interrupted by SIGINT so that a
@@ -4016,7 +4026,7 @@ int iscsi_target_rx_thread(void *arg)
         */
        rc = wait_for_completion_interruptible(&conn->rx_login_comp);
        if (rc < 0 || iscsi_target_check_conn_state(conn))
-               return 0;
+               goto out;
 
        if (!conn->conn_transport->iscsit_get_rx_pdu)
                return 0;
@@ -4025,7 +4035,15 @@ int iscsi_target_rx_thread(void *arg)
 
        if (!signal_pending(current))
                atomic_set(&conn->transport_failed, 1);
-       iscsit_take_action_for_connection_exit(conn);
+       iscsit_take_action_for_connection_exit(conn, &conn_freed);
+
+out:
+       if (!conn_freed) {
+               while (!kthread_should_stop()) {
+                       msleep(100);
+               }
+       }
+
        return 0;
 }
 
index 9a96e17bf7cd5f7448c880ffafcaa123730ebe71..7fe2aa73cff69e04f8df8d79e3af1c634fb5ca04 100644 (file)
@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
        }
 }
 
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
 {
+       *conn_freed = false;
+
        spin_lock_bh(&conn->state_lock);
        if (atomic_read(&conn->connection_exit)) {
                spin_unlock_bh(&conn->state_lock);
@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
        if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
                spin_unlock_bh(&conn->state_lock);
                iscsit_close_connection(conn);
+               *conn_freed = true;
                return;
        }
 
@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
        spin_unlock_bh(&conn->state_lock);
 
        iscsit_handle_connection_cleanup(conn);
+       *conn_freed = true;
 }
index 60e69e2af6eda981efb74e4ac313fb0d031093bd..3822d9cd12302071467af03d4920fda601fdd351 100644 (file)
@@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
 extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
 
 #endif   /*** ISCSI_TARGET_ERL0_H ***/
index 66238477137bc46d35cade3167451e19f2d401ce..92b96b51d5068e77c45d85a5a4d16efc9ffa5a93 100644 (file)
@@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg)
                        break;
        }
 
+       while (!kthread_should_stop()) {
+               msleep(100);
+       }
+
        return 0;
 }
index 7ccc9c1cbfd1a664fb4c37a5dd71f305e735f4bb..6f88b31242b0562b297e60fdf61552719ed7a97c 100644 (file)
@@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
 
 static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
 
-static bool iscsi_target_sk_state_check(struct sock *sk)
+static bool __iscsi_target_sk_check_close(struct sock *sk)
 {
        if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
-               pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
+               pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
                        "returning FALSE\n");
-               return false;
+               return true;
        }
-       return true;
+       return false;
+}
+
+static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               read_lock_bh(&sk->sk_callback_lock);
+               state = (__iscsi_target_sk_check_close(sk) ||
+                        test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+               read_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
+}
+
+static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               read_lock_bh(&sk->sk_callback_lock);
+               state = test_bit(flag, &conn->login_flags);
+               read_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
+}
+
+static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               write_lock_bh(&sk->sk_callback_lock);
+               state = (__iscsi_target_sk_check_close(sk) ||
+                        test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+               if (!state)
+                       clear_bit(flag, &conn->login_flags);
+               write_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
 }
 
 static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
@@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
        pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
                        conn, current->comm, current->pid);
+       /*
+        * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
+        * before initial PDU processing in iscsi_target_start_negotiation()
+        * has completed, go ahead and retry until it's cleared.
+        *
+        * Otherwise if the TCP connection drops while this is occuring,
+        * iscsi_target_start_negotiation() will detect the failure, call
+        * cancel_delayed_work_sync(&conn->login_work), and cleanup the
+        * remaining iscsi connection resources from iscsi_np process context.
+        */
+       if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
+               schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
+               return;
+       }
 
        spin_lock(&tpg->tpg_state_lock);
        state = (tpg->tpg_state == TPG_STATE_ACTIVE);
@@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
        if (!state) {
                pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
-               return;
+               goto err;
        }
 
-       if (conn->sock) {
-               struct sock *sk = conn->sock->sk;
-
-               read_lock_bh(&sk->sk_callback_lock);
-               state = iscsi_target_sk_state_check(sk);
-               read_unlock_bh(&sk->sk_callback_lock);
-
-               if (!state) {
-                       pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
-                       iscsi_target_restore_sock_callbacks(conn);
-                       iscsi_target_login_drop(conn, login);
-                       iscsit_deaccess_np(np, tpg, tpg_np);
-                       return;
-               }
+       if (iscsi_target_sk_check_close(conn)) {
+               pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+               goto err;
        }
 
        conn->login_kworker = current;
@@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
        flush_signals(current);
        conn->login_kworker = NULL;
 
-       if (rc < 0) {
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
-               return;
-       }
+       if (rc < 0)
+               goto err;
 
        pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
                        conn, current->comm, current->pid);
 
        rc = iscsi_target_do_login(conn, login);
        if (rc < 0) {
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
+               goto err;
        } else if (!rc) {
-               if (conn->sock) {
-                       struct sock *sk = conn->sock->sk;
-
-                       write_lock_bh(&sk->sk_callback_lock);
-                       clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
-                       write_unlock_bh(&sk->sk_callback_lock);
-               }
+               if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
+                       goto err;
        } else if (rc == 1) {
                iscsi_target_nego_release(conn);
                iscsi_post_login_handler(np, conn, zero_tsih);
                iscsit_deaccess_np(np, tpg, tpg_np);
        }
+       return;
+
+err:
+       iscsi_target_restore_sock_callbacks(conn);
+       iscsi_target_login_drop(conn, login);
+       iscsit_deaccess_np(np, tpg, tpg_np);
 }
 
 static void iscsi_target_do_cleanup(struct work_struct *work)
@@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
                orig_state_change(sk);
                return;
        }
+       state = __iscsi_target_sk_check_close(sk);
+       pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
+
        if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
                pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
                         " conn: %p\n", conn);
+               if (state)
+                       set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
                write_unlock_bh(&sk->sk_callback_lock);
                orig_state_change(sk);
                return;
        }
-       if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+       if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
                pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
                         conn);
                write_unlock_bh(&sk->sk_callback_lock);
                orig_state_change(sk);
                return;
        }
+       /*
+        * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
+        * but only queue conn->login_work -> iscsi_target_do_login_rx()
+        * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
+        *
+        * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
+        * will detect the dropped TCP connection from delayed workqueue context.
+        *
+        * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
+        * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
+        * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
+        * via iscsi_target_sk_check_and_clear() is responsible for detecting the
+        * dropped TCP connection in iscsi_np process context, and cleaning up
+        * the remaining iscsi connection resources.
+        */
+       if (state) {
+               pr_debug("iscsi_target_sk_state_change got failed state\n");
+               set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+               state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+               write_unlock_bh(&sk->sk_callback_lock);
 
-       state = iscsi_target_sk_state_check(sk);
-       write_unlock_bh(&sk->sk_callback_lock);
-
-       pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
+               orig_state_change(sk);
 
-       if (!state) {
-               pr_debug("iscsi_target_sk_state_change got failed state\n");
-               schedule_delayed_work(&conn->login_cleanup_work, 0);
+               if (!state)
+                       schedule_delayed_work(&conn->login_work, 0);
                return;
        }
+       write_unlock_bh(&sk->sk_callback_lock);
+
        orig_state_change(sk);
 }
 
@@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
                        if (iscsi_target_handle_csg_one(conn, login) < 0)
                                return -1;
                        if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+                               /*
+                                * Check to make sure the TCP connection has not
+                                * dropped asynchronously while session reinstatement
+                                * was occuring in this kthread context, before
+                                * transitioning to full feature phase operation.
+                                */
+                               if (iscsi_target_sk_check_close(conn))
+                                       return -1;
+
                                login->tsih = conn->sess->tsih;
                                login->login_complete = 1;
                                iscsi_target_restore_sock_callbacks(conn);
@@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
                break;
        }
 
-       if (conn->sock) {
-               struct sock *sk = conn->sock->sk;
-               bool state;
-
-               read_lock_bh(&sk->sk_callback_lock);
-               state = iscsi_target_sk_state_check(sk);
-               read_unlock_bh(&sk->sk_callback_lock);
-
-               if (!state) {
-                       pr_debug("iscsi_target_do_login() failed state for"
-                                " conn: %p\n", conn);
-                       return -1;
-               }
-       }
-
        return 0;
 }
 
@@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation(
 
                write_lock_bh(&sk->sk_callback_lock);
                set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+               set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
                write_unlock_bh(&sk->sk_callback_lock);
        }
-
+       /*
+        * If iscsi_target_do_login returns zero to signal more PDU
+        * exchanges are required to complete the login, go ahead and
+        * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
+        * is still active.
+        *
+        * Otherwise if TCP connection dropped asynchronously, go ahead
+        * and perform connection cleanup now.
+        */
        ret = iscsi_target_do_login(conn, login);
+       if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+               ret = -1;
+
        if (ret < 0) {
                cancel_delayed_work_sync(&conn->login_work);
                cancel_delayed_work_sync(&conn->login_cleanup_work);
index 37f57357d4a0827f5669cb89d1619c4651192547..6025935036c976edeeee0d7a91df79a66aa84a2b 100644 (file)
@@ -1160,15 +1160,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
        if (cmd->unknown_data_length) {
                cmd->data_length = size;
        } else if (size != cmd->data_length) {
-               pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+               pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
                        " %u does not match SCSI CDB Length: %u for SAM Opcode:"
                        " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
                                cmd->data_length, size, cmd->t_task_cdb[0]);
 
-               if (cmd->data_direction == DMA_TO_DEVICE &&
-                   cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
-                       pr_err("Rejecting underflow/overflow WRITE data\n");
-                       return TCM_INVALID_CDB_FIELD;
+               if (cmd->data_direction == DMA_TO_DEVICE) {
+                       if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+                               pr_err_ratelimited("Rejecting underflow/overflow"
+                                                  " for WRITE data CDB\n");
+                               return TCM_INVALID_CDB_FIELD;
+                       }
+                       /*
+                        * Some fabric drivers like iscsi-target still expect to
+                        * always reject overflow writes.  Reject this case until
+                        * full fabric driver level support for overflow writes
+                        * is introduced tree-wide.
+                        */
+                       if (size > cmd->data_length) {
+                               pr_err_ratelimited("Rejecting overflow for"
+                                                  " WRITE control CDB\n");
+                               return TCM_INVALID_CDB_FIELD;
+                       }
                }
                /*
                 * Reject READ_* or WRITE_* with overflow/underflow for
index 9045837f748bd3b602256cfa9e83058ae92b8b33..beb5f098f32d6f7bb5851deb810065ab37e4ac4a 100644 (file)
@@ -97,7 +97,7 @@ struct tcmu_hba {
 
 struct tcmu_dev {
        struct list_head node;
-
+       struct kref kref;
        struct se_device se_dev;
 
        char *name;
@@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
        if (!udev)
                return NULL;
+       kref_init(&udev->kref);
 
        udev->name = kstrdup(name, GFP_KERNEL);
        if (!udev->name) {
@@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
        return 0;
 }
 
+static void tcmu_dev_call_rcu(struct rcu_head *p)
+{
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
+       struct tcmu_dev *udev = TCMU_DEV(dev);
+
+       kfree(udev->uio_info.name);
+       kfree(udev->name);
+       kfree(udev);
+}
+
+static void tcmu_dev_kref_release(struct kref *kref)
+{
+       struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
+       struct se_device *dev = &udev->se_dev;
+
+       call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+}
+
 static int tcmu_release(struct uio_info *info, struct inode *inode)
 {
        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
@@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
        clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
 
        pr_debug("close\n");
-
+       /* release ref from configure */
+       kref_put(&udev->kref, tcmu_dev_kref_release);
        return 0;
 }
 
@@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev)
                dev->dev_attrib.hw_max_sectors = 128;
        dev->dev_attrib.hw_queue_depth = 128;
 
+       /*
+        * Get a ref incase userspace does a close on the uio device before
+        * LIO has initiated tcmu_free_device.
+        */
+       kref_get(&udev->kref);
+
        ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
                                 udev->uio_info.uio_dev->minor);
        if (ret)
@@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev)
        return 0;
 
 err_netlink:
+       kref_put(&udev->kref, tcmu_dev_kref_release);
        uio_unregister_device(&udev->uio_info);
 err_register:
        vfree(udev->mb_addr);
 err_vzalloc:
        kfree(info->name);
+       info->name = NULL;
 
        return ret;
 }
@@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
        return -EINVAL;
 }
 
-static void tcmu_dev_call_rcu(struct rcu_head *p)
-{
-       struct se_device *dev = container_of(p, struct se_device, rcu_head);
-       struct tcmu_dev *udev = TCMU_DEV(dev);
-
-       kfree(udev);
-}
-
 static bool tcmu_dev_configured(struct tcmu_dev *udev)
 {
        return udev->uio_info.uio_dev ? true : false;
@@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev)
                                   udev->uio_info.uio_dev->minor);
 
                uio_unregister_device(&udev->uio_info);
-               kfree(udev->uio_info.name);
-               kfree(udev->name);
        }
-       call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+
+       /* release ref from init */
+       kref_put(&udev->kref, tcmu_dev_kref_release);
 }
 
 enum {
index ab08af4654ef18c05d1ab9725dd96fb44f9264bf..42c098e86f849393eeb580e85fef1f93a14bdac7 100644 (file)
@@ -9,8 +9,9 @@ config BCM2835_THERMAL
 config BCM_NS_THERMAL
        tristate "Northstar thermal driver"
        depends on ARCH_BCM_IPROC || COMPILE_TEST
+       default y if ARCH_BCM_IPROC
        help
-         Northstar is a family of SoCs that includes e.g. BCM4708, BCM47081,
-         BCM4709 and BCM47094. It contains DMU (Device Management Unit) block
-         with a thermal sensor that allows checking CPU temperature. This
-         driver provides support for it.
+         Support for the Northstar and Northstar Plus family of SoCs (e.g.
+         BCM4708, BCM4709, BCM5301x, BCM95852X, etc). It contains DMU (Device
+         Management Unit) block with a thermal sensor that allows checking CPU
+         temperature.
index 644ba526d9ea09d2b1d0525cbf298cf10893cad0..4362a69ac88dcff8573e88e16fd6f3653432dd68 100644 (file)
@@ -195,7 +195,6 @@ static struct thermal_zone_of_device_ops tmu_tz_ops = {
 static int qoriq_tmu_probe(struct platform_device *pdev)
 {
        int ret;
-       const struct thermal_trip *trip;
        struct qoriq_tmu_data *data;
        struct device_node *np = pdev->dev.of_node;
        u32 site = 0;
@@ -243,8 +242,6 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
                goto err_tmu;
        }
 
-       trip = of_thermal_get_trip_points(data->tz);
-
        /* Enable monitoring */
        site |= 0x1 << (15 - data->sensor_id);
        tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
index b21b9cc2c8d6412897193a4ce53e295966737e9c..5a51c740e37238b31b9883d689aa67ffaea15704 100644 (file)
@@ -359,7 +359,7 @@ static DECLARE_DELAYED_WORK(thermal_emergency_poweroff_work,
  * This may be called from any critical situation to trigger a system shutdown
  * after a known period of time. By default this is not scheduled.
  */
-void thermal_emergency_poweroff(void)
+static void thermal_emergency_poweroff(void)
 {
        int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS;
        /*
index ba9c302454fb398490046be46831c0a0f821788c..696ab3046b87d98cff65182f60499c98578175bd 100644 (file)
@@ -1010,7 +1010,7 @@ ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id)
 }
 
 /**
- * ti_bandgap_set_continous_mode() - One time enabling of continuous mode
+ * ti_bandgap_set_continuous_mode() - One time enabling of continuous mode
  * @bgp: pointer to struct ti_bandgap
  *
  * Call this function only if HAS(MODE_CONFIG) is set. As this driver may
@@ -1214,22 +1214,18 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
        }
 
        bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL);
-       if (!bgp) {
-               dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
+       if (!bgp)
                return ERR_PTR(-ENOMEM);
-       }
 
        of_id = of_match_device(of_ti_bandgap_match, &pdev->dev);
        if (of_id)
                bgp->conf = of_id->data;
 
        /* register shadow for context save and restore */
-       bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) *
-                                  bgp->conf->sensor_count, GFP_KERNEL);
-       if (!bgp->regval) {
-               dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
+       bgp->regval = devm_kcalloc(&pdev->dev, bgp->conf->sensor_count,
+                                  sizeof(*bgp->regval), GFP_KERNEL);
+       if (!bgp->regval)
                return ERR_PTR(-ENOMEM);
-       }
 
        i = 0;
        do {
index 7ac9bcdf1e61a551bb8286a48c0afc492e4a95ee..61fe8d6fd24e1bb0cf2cdfd2a4de67ceb05123bf 100644 (file)
@@ -764,7 +764,7 @@ static int __init ehv_bc_init(void)
        ehv_bc_driver = alloc_tty_driver(count);
        if (!ehv_bc_driver) {
                ret = -ENOMEM;
-               goto error;
+               goto err_free_bcs;
        }
 
        ehv_bc_driver->driver_name = "ehv-bc";
@@ -778,24 +778,23 @@ static int __init ehv_bc_init(void)
        ret = tty_register_driver(ehv_bc_driver);
        if (ret) {
                pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret);
-               goto error;
+               goto err_put_tty_driver;
        }
 
        ret = platform_driver_register(&ehv_bc_tty_driver);
        if (ret) {
                pr_err("ehv-bc: could not register platform driver (ret=%i)\n",
                       ret);
-               goto error;
+               goto err_deregister_tty_driver;
        }
 
        return 0;
 
-error:
-       if (ehv_bc_driver) {
-               tty_unregister_driver(ehv_bc_driver);
-               put_tty_driver(ehv_bc_driver);
-       }
-
+err_deregister_tty_driver:
+       tty_unregister_driver(ehv_bc_driver);
+err_put_tty_driver:
+       put_tty_driver(ehv_bc_driver);
+err_free_bcs:
        kfree(bcs);
 
        return ret;
index 433de5ea9b02f53386b9367e64ad880ac6795e07..f71b47334149ace03ff276e41d8ecfd2a7038c6e 100644 (file)
@@ -122,6 +122,18 @@ void serdev_device_write_wakeup(struct serdev_device *serdev)
 }
 EXPORT_SYMBOL_GPL(serdev_device_write_wakeup);
 
+int serdev_device_write_buf(struct serdev_device *serdev,
+                           const unsigned char *buf, size_t count)
+{
+       struct serdev_controller *ctrl = serdev->ctrl;
+
+       if (!ctrl || !ctrl->ops->write_buf)
+               return -EINVAL;
+
+       return ctrl->ops->write_buf(ctrl, buf, count);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_buf);
+
 int serdev_device_write(struct serdev_device *serdev,
                        const unsigned char *buf, size_t count,
                        unsigned long timeout)
index 487c88f6aa0e36b5f986f5c02477e65f0f55143c..d0a021c93986252ceae888d7c26bde3eba295e9e 100644 (file)
@@ -102,9 +102,6 @@ static int ttyport_open(struct serdev_controller *ctrl)
                return PTR_ERR(tty);
        serport->tty = tty;
 
-       serport->port->client_ops = &client_ops;
-       serport->port->client_data = ctrl;
-
        if (tty->ops->open)
                tty->ops->open(serport->tty, NULL);
        else
@@ -215,6 +212,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
                                        struct device *parent,
                                        struct tty_driver *drv, int idx)
 {
+       const struct tty_port_client_operations *old_ops;
        struct serdev_controller *ctrl;
        struct serport *serport;
        int ret;
@@ -233,28 +231,37 @@ struct device *serdev_tty_port_register(struct tty_port *port,
 
        ctrl->ops = &ctrl_ops;
 
+       old_ops = port->client_ops;
+       port->client_ops = &client_ops;
+       port->client_data = ctrl;
+
        ret = serdev_controller_add(ctrl);
        if (ret)
-               goto err_controller_put;
+               goto err_reset_data;
 
        dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx);
        return &ctrl->dev;
 
-err_controller_put:
+err_reset_data:
+       port->client_data = NULL;
+       port->client_ops = old_ops;
        serdev_controller_put(ctrl);
+
        return ERR_PTR(ret);
 }
 
-void serdev_tty_port_unregister(struct tty_port *port)
+int serdev_tty_port_unregister(struct tty_port *port)
 {
        struct serdev_controller *ctrl = port->client_data;
        struct serport *serport = serdev_controller_get_drvdata(ctrl);
 
        if (!serport)
-               return;
+               return -ENODEV;
 
        serdev_controller_remove(ctrl);
        port->client_ops = NULL;
        port->client_data = NULL;
        serdev_controller_put(ctrl);
+
+       return 0;
 }
index 09a65a3ec7f7df0bb3f7355fd7758d797169f960..68fd045a7025047726860547ecd661b95d61ac80 100644 (file)
@@ -47,6 +47,7 @@
 /*
  * These are definitions for the Exar XR17V35X and XR17(C|D)15X
  */
+#define UART_EXAR_INT0         0x80
 #define UART_EXAR_SLEEP                0x8b    /* Sleep mode */
 #define UART_EXAR_DVID         0x8d    /* Device identification */
 
@@ -1337,7 +1338,7 @@ out_lock:
        /*
         * Check if the device is a Fintek F81216A
         */
-       if (port->type == PORT_16550A)
+       if (port->type == PORT_16550A && port->iotype == UPIO_PORT)
                fintek_8250_probe(up);
 
        if (up->capabilities != old_capabilities) {
@@ -1869,17 +1870,13 @@ static int serial8250_default_handle_irq(struct uart_port *port)
 static int exar_handle_irq(struct uart_port *port)
 {
        unsigned int iir = serial_port_in(port, UART_IIR);
-       int ret;
+       int ret = 0;
 
-       ret = serial8250_handle_irq(port, iir);
+       if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) &&
+           serial_port_in(port, UART_EXAR_INT0) != 0)
+               ret = 1;
 
-       if ((port->type == PORT_XR17V35X) ||
-          (port->type == PORT_XR17D15X)) {
-               serial_port_in(port, 0x80);
-               serial_port_in(port, 0x81);
-               serial_port_in(port, 0x82);
-               serial_port_in(port, 0x83);
-       }
+       ret |= serial8250_handle_irq(port, iir);
 
        return ret;
 }
@@ -2177,6 +2174,8 @@ int serial8250_do_startup(struct uart_port *port)
        serial_port_in(port, UART_RX);
        serial_port_in(port, UART_IIR);
        serial_port_in(port, UART_MSR);
+       if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
+               serial_port_in(port, UART_EXAR_INT0);
 
        /*
         * At this point, there's no way the LSR could still be 0xff;
@@ -2335,6 +2334,8 @@ dont_test_tx_en:
        serial_port_in(port, UART_RX);
        serial_port_in(port, UART_IIR);
        serial_port_in(port, UART_MSR);
+       if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
+               serial_port_in(port, UART_EXAR_INT0);
        up->lsr_saved_flags = 0;
        up->msr_saved_flags = 0;
 
index 18e3f8342b8554ae4e7d414dd39653fb3a33b26f..0475f5d261cef6ee60f7bcd2e9cde6572a520c46 100644 (file)
@@ -478,6 +478,7 @@ static int altera_jtaguart_remove(struct platform_device *pdev)
 
        port = &altera_jtaguart_ports[i].port;
        uart_remove_one_port(&altera_jtaguart_driver, port);
+       iounmap(port->membase);
 
        return 0;
 }
index 46d3438a0d27014ce97115e7a17b0eb024f12035..3e4b717670d7432e32d4b66568900773f6e239ce 100644 (file)
@@ -615,6 +615,7 @@ static int altera_uart_remove(struct platform_device *pdev)
        if (port) {
                uart_remove_one_port(&altera_uart_driver, port);
                port->mapbase = 0;
+               iounmap(port->membase);
        }
 
        return 0;
index ebd8569f9ad5da126b2ff15985943e630135925a..9fff25be87f9db91273aa96e5b015705e7fd9118 100644 (file)
@@ -27,6 +27,7 @@
 #define UARTn_FRAME            0x04
 #define UARTn_FRAME_DATABITS__MASK     0x000f
 #define UARTn_FRAME_DATABITS(n)                ((n) - 3)
+#define UARTn_FRAME_PARITY__MASK       0x0300
 #define UARTn_FRAME_PARITY_NONE                0x0000
 #define UARTn_FRAME_PARITY_EVEN                0x0200
 #define UARTn_FRAME_PARITY_ODD         0x0300
@@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port,
                        16 * (4 + (clkdiv >> 6)));
 
        frame = efm32_uart_read32(efm_port, UARTn_FRAME);
-       if (frame & UARTn_FRAME_PARITY_ODD)
+       switch (frame & UARTn_FRAME_PARITY__MASK) {
+       case UARTn_FRAME_PARITY_ODD:
                *parity = 'o';
-       else if (frame & UARTn_FRAME_PARITY_EVEN)
+               break;
+       case UARTn_FRAME_PARITY_EVEN:
                *parity = 'e';
-       else
+               break;
+       default:
                *parity = 'n';
+       }
 
        *bits = (frame & UARTn_FRAME_DATABITS__MASK) -
                        UARTn_FRAME_DATABITS(4) + 4;
index 157883653256da9ebeae89d7dabead3285656c82..f190a84a02465668d64f3b6a57d6462588d781ad 100644 (file)
@@ -1382,9 +1382,9 @@ static struct spi_driver ifx_spi_driver = {
 static void __exit ifx_spi_exit(void)
 {
        /* unregister */
+       spi_unregister_driver(&ifx_spi_driver);
        tty_unregister_driver(tty_drv);
        put_tty_driver(tty_drv);
-       spi_unregister_driver(&ifx_spi_driver);
        unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
 }
 
index 33509b4beaec237715de1e8febd63e861fbb9c15..bbefddd92bfeae77b637033af8028ac481642931 100644 (file)
@@ -2184,7 +2184,9 @@ static int serial_imx_probe(struct platform_device *pdev)
                 * and DCD (when they are outputs) or enables the respective
                 * irqs. So set this bit early, i.e. before requesting irqs.
                 */
-               writel(UFCR_DCEDTE, sport->port.membase + UFCR);
+               reg = readl(sport->port.membase + UFCR);
+               if (!(reg & UFCR_DCEDTE))
+                       writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR);
 
                /*
                 * Disable UCR3_RI and UCR3_DCD irqs. They are also not
@@ -2195,7 +2197,15 @@ static int serial_imx_probe(struct platform_device *pdev)
                       sport->port.membase + UCR3);
 
        } else {
-               writel(0, sport->port.membase + UFCR);
+               unsigned long ucr3 = UCR3_DSR;
+
+               reg = readl(sport->port.membase + UFCR);
+               if (reg & UFCR_DCEDTE)
+                       writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR);
+
+               if (!is_imx1_uart(sport))
+                       ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
+               writel(ucr3, sport->port.membase + UCR3);
        }
 
        clk_disable_unprepare(sport->clk_ipg);
index 0f45b7884a2c58a299668591e87bead1f6628f98..13bfd5dcffce5c0bfaee47879277e32aedf308a9 100644 (file)
@@ -2083,7 +2083,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
        mutex_lock(&port->mutex);
 
        tty_dev = device_find_child(uport->dev, &match, serial_match_port);
-       if (device_may_wakeup(tty_dev)) {
+       if (tty_dev && device_may_wakeup(tty_dev)) {
                if (!enable_irq_wake(uport->irq))
                        uport->irq_wake = 1;
                put_device(tty_dev);
@@ -2782,7 +2782,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
         * Register the port whether it's detected or not.  This allows
         * setserial to be used to alter this port's parameters.
         */
-       tty_dev = tty_port_register_device_attr(port, drv->tty_driver,
+       tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver,
                        uport->line, uport->dev, port, uport->tty_groups);
        if (likely(!IS_ERR(tty_dev))) {
                device_set_wakeup_capable(tty_dev, 1);
@@ -2845,7 +2845,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
        /*
         * Remove the devices from the tty layer
         */
-       tty_unregister_device(drv->tty_driver, uport->line);
+       tty_port_unregister_device(port, drv->tty_driver, uport->line);
 
        tty = tty_port_tty_get(port);
        if (tty) {
index 1d21a9c1d33e6e3c5ef007a25e4b4550fb62a51c..6b137194069fee47bc45b1866865ee27adfd33c5 100644 (file)
@@ -128,20 +128,86 @@ struct device *tty_port_register_device_attr(struct tty_port *port,
                struct tty_driver *driver, unsigned index,
                struct device *device, void *drvdata,
                const struct attribute_group **attr_grp)
+{
+       tty_port_link_device(port, driver, index);
+       return tty_register_device_attr(driver, index, device, drvdata,
+                       attr_grp);
+}
+EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
+
+/**
+ * tty_port_register_device_attr_serdev - register tty or serdev device
+ * @port: tty_port of the device
+ * @driver: tty_driver for this device
+ * @index: index of the tty
+ * @device: parent if exists, otherwise NULL
+ * @drvdata: driver data for the device
+ * @attr_grp: attribute group for the device
+ *
+ * Register a serdev or tty device depending on if the parent device has any
+ * defined serdev clients or not.
+ */
+struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
+               struct tty_driver *driver, unsigned index,
+               struct device *device, void *drvdata,
+               const struct attribute_group **attr_grp)
 {
        struct device *dev;
 
        tty_port_link_device(port, driver, index);
 
        dev = serdev_tty_port_register(port, device, driver, index);
-       if (PTR_ERR(dev) != -ENODEV)
+       if (PTR_ERR(dev) != -ENODEV) {
                /* Skip creating cdev if we registered a serdev device */
                return dev;
+       }
 
        return tty_register_device_attr(driver, index, device, drvdata,
                        attr_grp);
 }
-EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
+EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev);
+
+/**
+ * tty_port_register_device_serdev - register tty or serdev device
+ * @port: tty_port of the device
+ * @driver: tty_driver for this device
+ * @index: index of the tty
+ * @device: parent if exists, otherwise NULL
+ *
+ * Register a serdev or tty device depending on if the parent device has any
+ * defined serdev clients or not.
+ */
+struct device *tty_port_register_device_serdev(struct tty_port *port,
+               struct tty_driver *driver, unsigned index,
+               struct device *device)
+{
+       return tty_port_register_device_attr_serdev(port, driver, index,
+                       device, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(tty_port_register_device_serdev);
+
+/**
+ * tty_port_unregister_device - deregister a tty or serdev device
+ * @port: tty_port of the device
+ * @driver: tty_driver for this device
+ * @index: index of the tty
+ *
+ * If a tty or serdev device is registered with a call to
+ * tty_port_register_device_serdev() then this function must be called when
+ * the device is gone.
+ */
+void tty_port_unregister_device(struct tty_port *port,
+               struct tty_driver *driver, unsigned index)
+{
+       int ret;
+
+       ret = serdev_tty_port_unregister(port);
+       if (ret == 0)
+               return;
+
+       tty_unregister_device(driver, index);
+}
+EXPORT_SYMBOL_GPL(tty_port_unregister_device);
 
 int tty_port_alloc_xmit_buf(struct tty_port *port)
 {
@@ -189,9 +255,6 @@ static void tty_port_destructor(struct kref *kref)
        /* check if last port ref was dropped before tty release */
        if (WARN_ON(port->itty))
                return;
-
-       serdev_tty_port_unregister(port);
-
        if (port->xmit_buf)
                free_page((unsigned long)port->xmit_buf);
        tty_port_destroy(port);
index 7a92a5e1d40c6f17227936ee2b6925286deab511..feca75b07fddce01e6e121542d3e0b74d321f85f 100644 (file)
@@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
                                st->global_error = 1;
                }
        }
-       st->va += PAGE_SIZE * nr;
-       st->index += nr;
+       st->va += XEN_PAGE_SIZE * nr;
+       st->index += nr / XEN_PFN_PER_PAGE;
 
        return 0;
 }
index 3fdde0b283c9b86f7fa6aaa9585593699be29fca..29308a80d66ffa118b9aec996874d65a952c29aa 100644 (file)
@@ -1671,8 +1671,12 @@ static long ceph_fallocate(struct file *file, int mode,
        }
 
        size = i_size_read(inode);
-       if (!(mode & FALLOC_FL_KEEP_SIZE))
+       if (!(mode & FALLOC_FL_KEEP_SIZE)) {
                endoff = offset + length;
+               ret = inode_newsize_ok(inode, endoff);
+               if (ret)
+                       goto unlock;
+       }
 
        if (fi->fmode & CEPH_FILE_MODE_LAZY)
                want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
index c22eaf162f95c1456563b31a8362da9e531c9185..2a6889b3585f068c73091d8895639b7e941d702a 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1154,6 +1154,17 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
                goto out;
        }
 
+       /*
+        * It is possible, particularly with mixed reads & writes to private
+        * mappings, that we have raced with a PMD fault that overlaps with
+        * the PTE we need to set up.  If so just return and the fault will be
+        * retried.
+        */
+       if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
+               vmf_ret = VM_FAULT_NOPAGE;
+               goto unlock_entry;
+       }
+
        /*
         * Note that we don't bother to use iomap_apply here: DAX required
         * the file system block size to be equal the page size, which means
@@ -1397,6 +1408,18 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
        if (IS_ERR(entry))
                goto fallback;
 
+       /*
+        * It is possible, particularly with mixed reads & writes to private
+        * mappings, that we have raced with a PTE fault that overlaps with
+        * the PMD we need to set up.  If so just return and the fault will be
+        * retried.
+        */
+       if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
+                       !pmd_devmap(*vmf->pmd)) {
+               result = 0;
+               goto unlock_entry;
+       }
+
        /*
         * Note that we don't use iomap_apply here.  We aren't doing I/O, only
         * setting up a mapping, so really we're using iomap_begin() as a way
index f865b96374df2b5c40ecfb13663154499ec09b31..d2955daf17a4fcefa2ded3a412f67732315de12a 100644 (file)
@@ -659,7 +659,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
        struct gfs2_log_header *lh;
        unsigned int tail;
        u32 hash;
-       int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META;
+       int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
        struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
        enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
        lh = page_address(page);
index f5714ee01000de49c5efcd3675226a3a3b7a7074..23542dc44a25c9f398b8a2a69905bf9eafbe5270 100644 (file)
@@ -454,6 +454,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
                        goto out_err_free;
 
                /* fh */
+               rc = -EIO;
                p = xdr_inline_decode(&stream, 4);
                if (!p)
                        goto out_err_free;
index e9b4c3320e371a90020ea25f4be44d2d76508db7..3e24392f2caa1296c4475ed84701fdfabfff467f 100644 (file)
@@ -398,7 +398,6 @@ extern struct file_system_type nfs4_referral_fs_type;
 bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
 struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
                        struct nfs_subversion *);
-void nfs_initialise_sb(struct super_block *);
 int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
 int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
 struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *,
@@ -458,7 +457,6 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
 extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
 
 /* super.c */
-void nfs_clone_super(struct super_block *, struct nfs_mount_info *);
 void nfs_umount_begin(struct super_block *);
 int  nfs_statfs(struct dentry *, struct kstatfs *);
 int  nfs_show_options(struct seq_file *, struct dentry *);
index 1a224a33a6c23c362e1bbacb8150bb9bbf02b3fd..e5686be67be8d361a32344e3aaaae235d739ffd7 100644 (file)
@@ -246,7 +246,7 @@ struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
 
        devname = nfs_devname(dentry, page, PAGE_SIZE);
        if (IS_ERR(devname))
-               mnt = (struct vfsmount *)devname;
+               mnt = ERR_CAST(devname);
        else
                mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata);
 
index 929d09a5310ad7df79be527a45a9aa524825b7f0..319a47db218d133d36b749641b63e5fa4489014c 100644 (file)
@@ -177,7 +177,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
        if (status)
                goto out;
 
-       if (!nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
+       if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
                                    &res->commit_res.verf->verifier)) {
                status = -EAGAIN;
                goto out;
index 692a7a8bfc7afd05ad40d6e04a4a53db07e01753..66776f02211131bd003e31748dec6774a02a077f 100644 (file)
@@ -582,7 +582,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
                         */
                        nfs4_schedule_path_down_recovery(pos);
                default:
-                       spin_lock(&nn->nfs_client_lock);
                        goto out;
                }
 
index adc6ec28d4b59d3181c76ca8f33a9fed25d68ce9..c383d0913b54c90fb96020a62faaf989a5d946b3 100644 (file)
@@ -2094,12 +2094,26 @@ pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
 
+/*
+ * Check for any intersection between the request and the pgio->pg_lseg,
+ * and if none, put this pgio->pg_lseg away.
+ */
+static void
+pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+       if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
+               pnfs_put_lseg(pgio->pg_lseg);
+               pgio->pg_lseg = NULL;
+       }
+}
+
 void
 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
 {
        u64 rd_size = req->wb_bytes;
 
        pnfs_generic_pg_check_layout(pgio);
+       pnfs_generic_pg_check_range(pgio, req);
        if (pgio->pg_lseg == NULL) {
                if (pgio->pg_dreq == NULL)
                        rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
@@ -2131,6 +2145,7 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
                           struct nfs_page *req, u64 wb_size)
 {
        pnfs_generic_pg_check_layout(pgio);
+       pnfs_generic_pg_check_range(pgio, req);
        if (pgio->pg_lseg == NULL) {
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   req->wb_context,
@@ -2191,16 +2206,10 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
                seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
                                     pgio->pg_lseg->pls_range.length);
                req_start = req_offset(req);
-               WARN_ON_ONCE(req_start >= seg_end);
+
                /* start of request is past the last byte of this segment */
-               if (req_start >= seg_end) {
-                       /* reference the new lseg */
-                       if (pgio->pg_ops->pg_cleanup)
-                               pgio->pg_ops->pg_cleanup(pgio);
-                       if (pgio->pg_ops->pg_init)
-                               pgio->pg_ops->pg_init(pgio, req);
+               if (req_start >= seg_end)
                        return 0;
-               }
 
                /* adjust 'size' iff there are fewer bytes left in the
                 * segment than what nfs_generic_pg_test returned */
index 2d05b756a8d6504e79796d71eb5471a578d9ef5a..99731e3e332f3ec32eec26cca47556193dcf68fe 100644 (file)
@@ -593,6 +593,16 @@ pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
        return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2);
 }
 
+static inline bool
+pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req)
+{
+       u64 seg_last = pnfs_end_offset(lseg->pls_range.offset, lseg->pls_range.length);
+       u64 req_last = req_offset(req) + req->wb_bytes;
+
+       return pnfs_is_range_intersecting(lseg->pls_range.offset, seg_last,
+                               req_offset(req), req_last);
+}
+
 extern unsigned int layoutstats_timer;
 
 #ifdef NFS_DEBUG
index 2f3822a4a7d565e9e2e607b61d2c580c1befdd05..eceb4eabb064953f830d1bf97a5d0daeecdd27f0 100644 (file)
@@ -2301,7 +2301,7 @@ EXPORT_SYMBOL_GPL(nfs_remount);
 /*
  * Initialise the common bits of the superblock
  */
-inline void nfs_initialise_sb(struct super_block *sb)
+static void nfs_initialise_sb(struct super_block *sb)
 {
        struct nfs_server *server = NFS_SB(sb);
 
@@ -2348,7 +2348,8 @@ EXPORT_SYMBOL_GPL(nfs_fill_super);
 /*
  * Finish setting up a cloned NFS2/3/4 superblock
  */
-void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
+static void nfs_clone_super(struct super_block *sb,
+                           struct nfs_mount_info *mount_info)
 {
        const struct super_block *old_sb = mount_info->cloned->sb;
        struct nfs_server *server = NFS_SB(sb);
index 12feac6ee2fd461a46c7b06b7a0ed0359fb4dfd1..452334694a5d1f37cc480e5d1cf2873c4246019d 100644 (file)
@@ -334,11 +334,8 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
        if (!p)
                return 0;
        p = xdr_decode_hyper(p, &args->offset);
-       args->count = ntohl(*p++);
-
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
 
+       args->count = ntohl(*p++);
        len = min(args->count, max_blocksize);
 
        /* set up the kvec */
@@ -352,7 +349,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
                v++;
        }
        args->vlen = v;
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -544,11 +541,9 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
        p = decode_fh(p, &args->fh);
        if (!p)
                return 0;
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -574,14 +569,10 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
        args->verf   = p; p += 2;
        args->dircount = ~0;
        args->count  = ntohl(*p++);
-
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        args->count  = min_t(u32, args->count, PAGE_SIZE);
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -599,9 +590,6 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
        args->dircount = ntohl(*p++);
        args->count    = ntohl(*p++);
 
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        len = args->count = min(args->count, max_blocksize);
        while (len > 0) {
                struct page *p = *(rqstp->rq_next_page++);
@@ -609,7 +597,8 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
                        args->buffer = page_address(p);
                len -= PAGE_SIZE;
        }
-       return 1;
+
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
index c453a1998e003d3e900407b266f1a15de5d5d94b..dadb3bf305b22f352a3f91a2df06b30284b4891c 100644 (file)
@@ -1769,6 +1769,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
                        opdesc->op_get_currentstateid(cstate, &op->u);
                op->status = opdesc->op_func(rqstp, cstate, &op->u);
 
+               /* Only from SEQUENCE */
+               if (cstate->status == nfserr_replay_cache) {
+                       dprintk("%s NFS4.1 replay from cache\n", __func__);
+                       status = op->status;
+                       goto out;
+               }
                if (!op->status) {
                        if (opdesc->op_set_currentstateid)
                                opdesc->op_set_currentstateid(cstate, &op->u);
@@ -1779,14 +1785,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
                        if (need_wrongsec_check(rqstp))
                                op->status = check_nfsd_access(current_fh->fh_export, rqstp);
                }
-
 encode_op:
-               /* Only from SEQUENCE */
-               if (cstate->status == nfserr_replay_cache) {
-                       dprintk("%s NFS4.1 replay from cache\n", __func__);
-                       status = op->status;
-                       goto out;
-               }
                if (op->status == nfserr_replay_me) {
                        op->replay = &cstate->replay_owner->so_replay;
                        nfsd4_encode_replay(&resp->xdr, op);
index 6a4947a3f4fa82be4118e4ed538a171118f4baa8..de07ff625777820fefc98bfa56adea81962e8135 100644 (file)
@@ -257,9 +257,6 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
        len = args->count     = ntohl(*p++);
        p++; /* totalcount - unused */
 
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2);
 
        /* set up somewhere to store response.
@@ -275,7 +272,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
                v++;
        }
        args->vlen = v;
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -365,11 +362,9 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli
        p = decode_fh(p, &args->fh);
        if (!p)
                return 0;
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -407,11 +402,9 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
        args->cookie = ntohl(*p++);
        args->count  = ntohl(*p++);
        args->count  = min_t(u32, args->count, PAGE_SIZE);
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 /*
index 358258364616cd3c2fee997daca2a192719cb045..4690cd75d8d7948a056fe899bc4600ade10b8566 100644 (file)
@@ -159,7 +159,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
                                        PTR_ERR(dent_inode));
                kfree(name);
                /* Return the error code. */
-               return (struct dentry *)dent_inode;
+               return ERR_CAST(dent_inode);
        }
        /* It is guaranteed that @name is no longer allocated at this point. */
        if (MREF_ERR(mref) == -ENOENT) {
index 827fc9809bc271f09b2c3b7abf4019c31d0e1636..9f88188060db9c7fa59e6882ecf33b55cf921788 100644 (file)
@@ -119,7 +119,7 @@ check_err:
 
        if (IS_ERR(inode)) {
                mlog_errno(PTR_ERR(inode));
-               result = (void *)inode;
+               result = ERR_CAST(inode);
                goto bail;
        }
 
index 0daac5112f7a32384b5febd39bd8499f31da8c31..c0c9683934b7a7883ab59eb8bbcd412e07385d0b 100644 (file)
@@ -1,5 +1,6 @@
 config OVERLAY_FS
        tristate "Overlay filesystem support"
+       select EXPORTFS
        help
          An overlay filesystem combines two filesystems - an 'upper' filesystem
          and a 'lower' filesystem.  When a name exists in both filesystems, the
index 9008ab9fbd2ebe89d419c249455eb740c48a9eb1..7a44533f4bbf24134a95bdc030bde5779f28457a 100644 (file)
@@ -300,7 +300,11 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
                        return PTR_ERR(fh);
        }
 
-       err = ovl_do_setxattr(upper, OVL_XATTR_ORIGIN, fh, fh ? fh->len : 0, 0);
+       /*
+        * Do not fail when upper doesn't support xattrs.
+        */
+       err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh,
+                                fh ? fh->len : 0, 0);
        kfree(fh);
 
        return err;
@@ -342,13 +346,14 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
        if (tmpfile)
                temp = ovl_do_tmpfile(upperdir, stat->mode);
        else
-               temp = ovl_lookup_temp(workdir, dentry);
-       err = PTR_ERR(temp);
-       if (IS_ERR(temp))
-               goto out1;
-
+               temp = ovl_lookup_temp(workdir);
        err = 0;
-       if (!tmpfile)
+       if (IS_ERR(temp)) {
+               err = PTR_ERR(temp);
+               temp = NULL;
+       }
+
+       if (!err && !tmpfile)
                err = ovl_create_real(wdir, temp, &cattr, NULL, true);
 
        if (new_creds) {
@@ -454,6 +459,11 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
        ovl_path_upper(parent, &parentpath);
        upperdir = parentpath.dentry;
 
+       /* Mark parent "impure" because it may now contain non-pure upper */
+       err = ovl_set_impure(parent, upperdir);
+       if (err)
+               return err;
+
        err = vfs_getattr(&parentpath, &pstat,
                          STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT);
        if (err)
index 723b98b9069876d1656b74735dabcaf01484e26d..a63a71656e9bdaef6ed5cadf8acdb6d8002fe1b6 100644 (file)
@@ -41,7 +41,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
        }
 }
 
-struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
+struct dentry *ovl_lookup_temp(struct dentry *workdir)
 {
        struct dentry *temp;
        char name[20];
@@ -68,7 +68,7 @@ static struct dentry *ovl_whiteout(struct dentry *workdir,
        struct dentry *whiteout;
        struct inode *wdir = workdir->d_inode;
 
-       whiteout = ovl_lookup_temp(workdir, dentry);
+       whiteout = ovl_lookup_temp(workdir);
        if (IS_ERR(whiteout))
                return whiteout;
 
@@ -127,17 +127,28 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
        return err;
 }
 
-static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
+static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper,
+                              int xerr)
 {
        int err;
 
-       err = ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
+       err = ovl_check_setxattr(dentry, upper, OVL_XATTR_OPAQUE, "y", 1, xerr);
        if (!err)
                ovl_dentry_set_opaque(dentry);
 
        return err;
 }
 
+static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
+{
+       /*
+        * Fail with -EIO when trying to create opaque dir and upper doesn't
+        * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to
+        * return a specific error for noxattr case.
+        */
+       return ovl_set_opaque_xerr(dentry, upperdentry, -EIO);
+}
+
 /* Common operations required to be done after creation of file on upper */
 static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
                            struct dentry *newdentry, bool hardlink)
@@ -162,6 +173,11 @@ static bool ovl_type_merge(struct dentry *dentry)
        return OVL_TYPE_MERGE(ovl_path_type(dentry));
 }
 
+static bool ovl_type_origin(struct dentry *dentry)
+{
+       return OVL_TYPE_ORIGIN(ovl_path_type(dentry));
+}
+
 static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
                            struct cattr *attr, struct dentry *hardlink)
 {
@@ -250,7 +266,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
        if (upper->d_parent->d_inode != udir)
                goto out_unlock;
 
-       opaquedir = ovl_lookup_temp(workdir, dentry);
+       opaquedir = ovl_lookup_temp(workdir);
        err = PTR_ERR(opaquedir);
        if (IS_ERR(opaquedir))
                goto out_unlock;
@@ -382,7 +398,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        if (err)
                goto out;
 
-       newdentry = ovl_lookup_temp(workdir, dentry);
+       newdentry = ovl_lookup_temp(workdir);
        err = PTR_ERR(newdentry);
        if (IS_ERR(newdentry))
                goto out_unlock;
@@ -846,18 +862,16 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
        if (IS_ERR(redirect))
                return PTR_ERR(redirect);
 
-       err = ovl_do_setxattr(ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT,
-                             redirect, strlen(redirect), 0);
+       err = ovl_check_setxattr(dentry, ovl_dentry_upper(dentry),
+                                OVL_XATTR_REDIRECT,
+                                redirect, strlen(redirect), -EXDEV);
        if (!err) {
                spin_lock(&dentry->d_lock);
                ovl_dentry_set_redirect(dentry, redirect);
                spin_unlock(&dentry->d_lock);
        } else {
                kfree(redirect);
-               if (err == -EOPNOTSUPP)
-                       ovl_clear_redirect_dir(dentry->d_sb);
-               else
-                       pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
+               pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
                /* Fall back to userspace copy-up */
                err = -EXDEV;
        }
@@ -943,6 +957,25 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
        old_upperdir = ovl_dentry_upper(old->d_parent);
        new_upperdir = ovl_dentry_upper(new->d_parent);
 
+       if (!samedir) {
+               /*
+                * When moving a merge dir or non-dir with copy up origin into
+                * a new parent, we are marking the new parent dir "impure".
+                * When ovl_iterate() iterates an "impure" upper dir, it will
+                * lookup the origin inodes of the entries to fill d_ino.
+                */
+               if (ovl_type_origin(old)) {
+                       err = ovl_set_impure(new->d_parent, new_upperdir);
+                       if (err)
+                               goto out_revert_creds;
+               }
+               if (!overwrite && ovl_type_origin(new)) {
+                       err = ovl_set_impure(old->d_parent, old_upperdir);
+                       if (err)
+                               goto out_revert_creds;
+               }
+       }
+
        trap = lock_rename(new_upperdir, old_upperdir);
 
        olddentry = lookup_one_len(old->d_name.name, old_upperdir,
@@ -992,7 +1025,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
                if (ovl_type_merge_or_lower(old))
                        err = ovl_set_redirect(old, samedir);
                else if (!old_opaque && ovl_type_merge(new->d_parent))
-                       err = ovl_set_opaque(old, olddentry);
+                       err = ovl_set_opaque_xerr(old, olddentry, -EXDEV);
                if (err)
                        goto out_dput;
        }
@@ -1000,7 +1033,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
                if (ovl_type_merge_or_lower(new))
                        err = ovl_set_redirect(new, samedir);
                else if (!new_opaque && ovl_type_merge(old->d_parent))
-                       err = ovl_set_opaque(new, newdentry);
+                       err = ovl_set_opaque_xerr(new, newdentry, -EXDEV);
                if (err)
                        goto out_dput;
        }
index ad9547f82da57fa4bd51eb5738cb8f02235e4455..d613e2c41242a52a6c018f43f9987bdbf461e0bb 100644 (file)
@@ -240,6 +240,16 @@ int ovl_xattr_get(struct dentry *dentry, const char *name,
        return res;
 }
 
+static bool ovl_can_list(const char *s)
+{
+       /* List all non-trusted xatts */
+       if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+               return true;
+
+       /* Never list trusted.overlay, list other trusted for superuser only */
+       return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+}
+
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
        struct dentry *realdentry = ovl_dentry_real(dentry);
@@ -263,7 +273,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
                        return -EIO;
 
                len -= slen;
-               if (ovl_is_private_xattr(s)) {
+               if (!ovl_can_list(s)) {
                        res -= slen;
                        memmove(s, s + slen, len);
                } else {
index bad0f665a63521efde00b4c488d4ed2ba85a5b75..f3136c31e72af24cbb9949449a12d292fc3bf11b 100644 (file)
@@ -169,17 +169,7 @@ invalid:
 
 static bool ovl_is_opaquedir(struct dentry *dentry)
 {
-       int res;
-       char val;
-
-       if (!d_is_dir(dentry))
-               return false;
-
-       res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
-       if (res == 1 && val == 'y')
-               return true;
-
-       return false;
+       return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE);
 }
 
 static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
@@ -351,6 +341,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
        unsigned int ctr = 0;
        struct inode *inode = NULL;
        bool upperopaque = false;
+       bool upperimpure = false;
        char *upperredirect = NULL;
        struct dentry *this;
        unsigned int i;
@@ -395,6 +386,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                                poe = roe;
                }
                upperopaque = d.opaque;
+               if (upperdentry && d.is_dir)
+                       upperimpure = ovl_is_impuredir(upperdentry);
        }
 
        if (!d.stop && poe->numlower) {
@@ -463,6 +456,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
        revert_creds(old_cred);
        oe->opaque = upperopaque;
+       oe->impure = upperimpure;
        oe->redirect = upperredirect;
        oe->__upperdentry = upperdentry;
        memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
index caa36cb9c46de9838805dc40e21672217407d40e..0623cebeefff8661d49d65a228ceec6290cee877 100644 (file)
@@ -24,6 +24,7 @@ enum ovl_path_type {
 #define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
 #define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect"
 #define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin"
+#define OVL_XATTR_IMPURE OVL_XATTR_PREFIX "impure"
 
 /*
  * The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
@@ -203,10 +204,10 @@ struct dentry *ovl_dentry_real(struct dentry *dentry);
 struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry);
 void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache);
 bool ovl_dentry_is_opaque(struct dentry *dentry);
+bool ovl_dentry_is_impure(struct dentry *dentry);
 bool ovl_dentry_is_whiteout(struct dentry *dentry);
 void ovl_dentry_set_opaque(struct dentry *dentry);
 bool ovl_redirect_dir(struct super_block *sb);
-void ovl_clear_redirect_dir(struct super_block *sb);
 const char *ovl_dentry_get_redirect(struct dentry *dentry);
 void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect);
 void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
@@ -219,6 +220,17 @@ bool ovl_is_whiteout(struct dentry *dentry);
 struct file *ovl_path_open(struct path *path, int flags);
 int ovl_copy_up_start(struct dentry *dentry);
 void ovl_copy_up_end(struct dentry *dentry);
+bool ovl_check_dir_xattr(struct dentry *dentry, const char *name);
+int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+                      const char *name, const void *value, size_t size,
+                      int xerr);
+int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
+
+static inline bool ovl_is_impuredir(struct dentry *dentry)
+{
+       return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE);
+}
+
 
 /* namei.c */
 int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
@@ -263,7 +275,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
 
 /* dir.c */
 extern const struct inode_operations ovl_dir_inode_operations;
-struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry);
+struct dentry *ovl_lookup_temp(struct dentry *workdir);
 struct cattr {
        dev_t rdev;
        umode_t mode;
index b2023ddb85323725b8bbfa687f31fc854c1ba5e9..34bc4a9f5c61d95f049b3f34ccd0de243aa27ddd 100644 (file)
@@ -28,6 +28,7 @@ struct ovl_fs {
        /* creds of process who forced instantiation of super block */
        const struct cred *creator_cred;
        bool tmpfile;
+       bool noxattr;
        wait_queue_head_t copyup_wq;
        /* sb common to all layers */
        struct super_block *same_sb;
@@ -42,6 +43,7 @@ struct ovl_entry {
                        u64 version;
                        const char *redirect;
                        bool opaque;
+                       bool impure;
                        bool copying;
                };
                struct rcu_head rcu;
index 9828b7de89992e64a1900a277b58423dde7b992a..4882ffb37baead1c4da41684158d22cbe58e5353 100644 (file)
@@ -891,6 +891,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                                dput(temp);
                        else
                                pr_warn("overlayfs: upper fs does not support tmpfile.\n");
+
+                       /*
+                        * Check if upper/work fs supports trusted.overlay.*
+                        * xattr
+                        */
+                       err = ovl_do_setxattr(ufs->workdir, OVL_XATTR_OPAQUE,
+                                             "0", 1, 0);
+                       if (err) {
+                               ufs->noxattr = true;
+                               pr_warn("overlayfs: upper fs does not support xattr.\n");
+                       } else {
+                               vfs_removexattr(ufs->workdir, OVL_XATTR_OPAQUE);
+                       }
                }
        }
 
@@ -961,7 +974,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        path_put(&workpath);
        kfree(lowertmp);
 
-       oe->__upperdentry = upperpath.dentry;
+       if (upperpath.dentry) {
+               oe->__upperdentry = upperpath.dentry;
+               oe->impure = ovl_is_impuredir(upperpath.dentry);
+       }
        for (i = 0; i < numlower; i++) {
                oe->lowerstack[i].dentry = stack[i].dentry;
                oe->lowerstack[i].mnt = ufs->lower_mnt[i];
index cfdea47313a10e22a9c06193e4cc422891badaae..809048913889189d083339d1d015ef4cad2af035 100644 (file)
@@ -175,6 +175,13 @@ bool ovl_dentry_is_opaque(struct dentry *dentry)
        return oe->opaque;
 }
 
+bool ovl_dentry_is_impure(struct dentry *dentry)
+{
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       return oe->impure;
+}
+
 bool ovl_dentry_is_whiteout(struct dentry *dentry)
 {
        return !dentry->d_inode && ovl_dentry_is_opaque(dentry);
@@ -191,14 +198,7 @@ bool ovl_redirect_dir(struct super_block *sb)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
 
-       return ofs->config.redirect_dir;
-}
-
-void ovl_clear_redirect_dir(struct super_block *sb)
-{
-       struct ovl_fs *ofs = sb->s_fs_info;
-
-       ofs->config.redirect_dir = false;
+       return ofs->config.redirect_dir && !ofs->noxattr;
 }
 
 const char *ovl_dentry_get_redirect(struct dentry *dentry)
@@ -303,3 +303,59 @@ void ovl_copy_up_end(struct dentry *dentry)
        wake_up_locked(&ofs->copyup_wq);
        spin_unlock(&ofs->copyup_wq.lock);
 }
+
+bool ovl_check_dir_xattr(struct dentry *dentry, const char *name)
+{
+       int res;
+       char val;
+
+       if (!d_is_dir(dentry))
+               return false;
+
+       res = vfs_getxattr(dentry, name, &val, 1);
+       if (res == 1 && val == 'y')
+               return true;
+
+       return false;
+}
+
+int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+                      const char *name, const void *value, size_t size,
+                      int xerr)
+{
+       int err;
+       struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+
+       if (ofs->noxattr)
+               return xerr;
+
+       err = ovl_do_setxattr(upperdentry, name, value, size, 0);
+
+       if (err == -EOPNOTSUPP) {
+               pr_warn("overlayfs: cannot set %s xattr on upper\n", name);
+               ofs->noxattr = true;
+               return xerr;
+       }
+
+       return err;
+}
+
+int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
+{
+       int err;
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       if (oe->impure)
+               return 0;
+
+       /*
+        * Do not fail when upper doesn't support xattrs.
+        * Upper inodes won't have origin nor redirect xattr anyway.
+        */
+       err = ovl_check_setxattr(dentry, upperdentry, OVL_XATTR_IMPURE,
+                                "y", 1, 0);
+       if (!err)
+               oe->impure = true;
+
+       return err;
+}
index 45f6bf68fff3ed30df0f85d98c0f644c320a9bf7..f1e1927ccd484e7372fe2a38db7455468bbf06e8 100644 (file)
@@ -821,7 +821,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
        if (!mmget_not_zero(mm))
                goto free;
 
-       flags = write ? FOLL_WRITE : 0;
+       flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
 
        while (count > 0) {
                int this_len = min_t(int, count, PAGE_SIZE);
index da01f497180a165d163935c4744ef82521bf9151..39bb1e838d8da683fa64dadf7ff150b9369d699e 100644 (file)
@@ -1112,7 +1112,7 @@ static int flush_commit_list(struct super_block *s,
                depth = reiserfs_write_unlock_nested(s);
                if (reiserfs_barrier_flush(s))
                        __sync_dirty_buffer(jl->j_commit_bh,
-                                       REQ_PREFLUSH | REQ_FUA);
+                                       REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
                else
                        sync_dirty_buffer(jl->j_commit_bh);
                reiserfs_write_lock_nested(s, depth);
@@ -1271,7 +1271,7 @@ static int _update_journal_header_block(struct super_block *sb,
 
                if (reiserfs_barrier_flush(sb))
                        __sync_dirty_buffer(journal->j_header_bh,
-                                       REQ_PREFLUSH | REQ_FUA);
+                                       REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
                else
                        sync_dirty_buffer(journal->j_header_bh);
 
index 131b2b77c8185403dc3cdf0393e8e0c915f3a850..29ecaf739449c4036e6ed3ebed5499b8489079c4 100644 (file)
@@ -812,9 +812,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
        uspi->s_dirblksize = UFS_SECTOR_SIZE;
        super_block_offset=UFS_SBLOCK;
 
-       /* Keep 2Gig file limit. Some UFS variants need to override 
-          this but as I don't know which I'll let those in the know loosen
-          the rules */
+       sb->s_maxbytes = MAX_LFS_FILESIZE;
+
        switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
        case UFS_MOUNT_UFSTYPE_44BSD:
                UFSD("ufstype=44bsd\n");
index f02eb767339219e8e59418429c666b3934d795f4..a7048eafa8e6d8c92a0c6888017389d8562c75ba 100644 (file)
@@ -1280,7 +1280,6 @@ xfs_bmap_read_extents(
                xfs_bmbt_rec_t  *frp;
                xfs_fsblock_t   nextbno;
                xfs_extnum_t    num_recs;
-               xfs_extnum_t    start;
 
                num_recs = xfs_btree_get_numrecs(block);
                if (unlikely(i + num_recs > room)) {
@@ -1303,7 +1302,6 @@ xfs_bmap_read_extents(
                 * Copy records into the extent records.
                 */
                frp = XFS_BMBT_REC_ADDR(mp, block, 1);
-               start = i;
                for (j = 0; j < num_recs; j++, i++, frp++) {
                        xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
                        trp->l0 = be64_to_cpu(frp->l0);
@@ -2065,8 +2063,10 @@ xfs_bmap_add_extent_delay_real(
                }
                temp = xfs_bmap_worst_indlen(bma->ip, temp);
                temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
-               diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
-                       (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+               diff = (int)(temp + temp2 -
+                            (startblockval(PREV.br_startblock) -
+                             (bma->cur ?
+                              bma->cur->bc_private.b.allocated : 0)));
                if (diff > 0) {
                        error = xfs_mod_fdblocks(bma->ip->i_mount,
                                                 -((int64_t)diff), false);
@@ -2123,7 +2123,6 @@ xfs_bmap_add_extent_delay_real(
                temp = da_new;
                if (bma->cur)
                        temp += bma->cur->bc_private.b.allocated;
-               ASSERT(temp <= da_old);
                if (temp < da_old)
                        xfs_mod_fdblocks(bma->ip->i_mount,
                                        (int64_t)(da_old - temp), false);
index 5392674bf8930550d949f6c01d0f47ec03228ba8..3a673ba201aae9f39c8190bfa2b8904120284ce5 100644 (file)
@@ -4395,7 +4395,7 @@ xfs_btree_visit_blocks(
                        xfs_btree_readahead_ptr(cur, ptr, 1);
 
                        /* save for the next iteration of the loop */
-                       lptr = *ptr;
+                       xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
                }
 
                /* for each buffer in the level */
index b177ef33cd4c3215de5f3bc228a467bcf73031e1..82a38d86ebad83346c441e802340a172402ef532 100644 (file)
@@ -1629,13 +1629,28 @@ xfs_refcount_recover_cow_leftovers(
        if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
                return -EOPNOTSUPP;
 
-       error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+       INIT_LIST_HEAD(&debris);
+
+       /*
+        * In this first part, we use an empty transaction to gather up
+        * all the leftover CoW extents so that we can subsequently
+        * delete them.  The empty transaction is used to avoid
+        * a buffer lock deadlock if there happens to be a loop in the
+        * refcountbt because we're allowed to re-grab a buffer that is
+        * already attached to our transaction.  When we're done
+        * recording the CoW debris we cancel the (empty) transaction
+        * and everything goes away cleanly.
+        */
+       error = xfs_trans_alloc_empty(mp, &tp);
        if (error)
                return error;
-       cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
+
+       error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+       if (error)
+               goto out_trans;
+       cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
 
        /* Find all the leftover CoW staging extents. */
-       INIT_LIST_HEAD(&debris);
        memset(&low, 0, sizeof(low));
        memset(&high, 0, sizeof(high));
        low.rc.rc_startblock = XFS_REFC_COW_START;
@@ -1645,10 +1660,11 @@ xfs_refcount_recover_cow_leftovers(
        if (error)
                goto out_cursor;
        xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
-       xfs_buf_relse(agbp);
+       xfs_trans_brelse(tp, agbp);
+       xfs_trans_cancel(tp);
 
        /* Now iterate the list to free the leftovers */
-       list_for_each_entry(rr, &debris, rr_list) {
+       list_for_each_entry_safe(rr, n, &debris, rr_list) {
                /* Set up transaction. */
                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
                if (error)
@@ -1676,8 +1692,16 @@ xfs_refcount_recover_cow_leftovers(
                error = xfs_trans_commit(tp);
                if (error)
                        goto out_free;
+
+               list_del(&rr->rr_list);
+               kmem_free(rr);
        }
 
+       return error;
+out_defer:
+       xfs_defer_cancel(&dfops);
+out_trans:
+       xfs_trans_cancel(tp);
 out_free:
        /* Free the leftover list */
        list_for_each_entry_safe(rr, n, &debris, rr_list) {
@@ -1688,11 +1712,6 @@ out_free:
 
 out_cursor:
        xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
-       xfs_buf_relse(agbp);
-       goto out_free;
-
-out_defer:
-       xfs_defer_cancel(&dfops);
-       xfs_trans_cancel(tp);
-       goto out_free;
+       xfs_trans_brelse(tp, agbp);
+       goto out_trans;
 }
index 2b954308a1d671e9f09d06a5353713297f11be76..9e3cc2146d5b03cdc84a4a0d92ce3c186314525d 100644 (file)
@@ -582,9 +582,13 @@ xfs_getbmap(
                }
                break;
        default:
+               /* Local format data forks report no extents. */
+               if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+                       bmv->bmv_entries = 0;
+                       return 0;
+               }
                if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
-                   ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
-                   ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+                   ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
                        return -EINVAL;
 
                if (xfs_get_extsz_hint(ip) ||
@@ -712,7 +716,7 @@ xfs_getbmap(
                         * extents.
                         */
                        if (map[i].br_startblock == DELAYSTARTBLOCK &&
-                           map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+                           map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
                                ASSERT((iflags & BMV_IF_DELALLOC) != 0);
 
                         if (map[i].br_startblock == HOLESTARTBLOCK &&
index 62fa39276a24bd91c26e3aa18c9f162f19842b95..07b77b73b0240c5cca4187d29e8e403cbf4f0714 100644 (file)
@@ -97,12 +97,16 @@ static inline void
 xfs_buf_ioacct_inc(
        struct xfs_buf  *bp)
 {
-       if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
+       if (bp->b_flags & XBF_NO_IOACCT)
                return;
 
        ASSERT(bp->b_flags & XBF_ASYNC);
-       bp->b_flags |= _XBF_IN_FLIGHT;
-       percpu_counter_inc(&bp->b_target->bt_io_count);
+       spin_lock(&bp->b_lock);
+       if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
+               bp->b_state |= XFS_BSTATE_IN_FLIGHT;
+               percpu_counter_inc(&bp->b_target->bt_io_count);
+       }
+       spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -110,14 +114,24 @@ xfs_buf_ioacct_inc(
  * freed and unaccount from the buftarg.
  */
 static inline void
-xfs_buf_ioacct_dec(
+__xfs_buf_ioacct_dec(
        struct xfs_buf  *bp)
 {
-       if (!(bp->b_flags & _XBF_IN_FLIGHT))
-               return;
+       ASSERT(spin_is_locked(&bp->b_lock));
 
-       bp->b_flags &= ~_XBF_IN_FLIGHT;
-       percpu_counter_dec(&bp->b_target->bt_io_count);
+       if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
+               bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
+               percpu_counter_dec(&bp->b_target->bt_io_count);
+       }
+}
+
+static inline void
+xfs_buf_ioacct_dec(
+       struct xfs_buf  *bp)
+{
+       spin_lock(&bp->b_lock);
+       __xfs_buf_ioacct_dec(bp);
+       spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -149,9 +163,9 @@ xfs_buf_stale(
         * unaccounted (released to LRU) before that occurs. Drop in-flight
         * status now to preserve accounting consistency.
         */
-       xfs_buf_ioacct_dec(bp);
-
        spin_lock(&bp->b_lock);
+       __xfs_buf_ioacct_dec(bp);
+
        atomic_set(&bp->b_lru_ref, 0);
        if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
            (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -979,12 +993,12 @@ xfs_buf_rele(
                 * ensures the decrement occurs only once per-buf.
                 */
                if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
-                       xfs_buf_ioacct_dec(bp);
+                       __xfs_buf_ioacct_dec(bp);
                goto out_unlock;
        }
 
        /* the last reference has been dropped ... */
-       xfs_buf_ioacct_dec(bp);
+       __xfs_buf_ioacct_dec(bp);
        if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
                /*
                 * If the buffer is added to the LRU take a new reference to the
index 8d1d44f87ce98834ad67cee6840d96d07d5666c0..1508121f29f29191da1a4efc7c8f12cf42eb0107 100644 (file)
@@ -63,7 +63,6 @@ typedef enum {
 #define _XBF_KMEM       (1 << 21)/* backed by heap memory */
 #define _XBF_DELWRI_Q   (1 << 22)/* buffer on a delwri queue */
 #define _XBF_COMPOUND   (1 << 23)/* compound buffer */
-#define _XBF_IN_FLIGHT  (1 << 25) /* I/O in flight, for accounting purposes */
 
 typedef unsigned int xfs_buf_flags_t;
 
@@ -84,14 +83,14 @@ typedef unsigned int xfs_buf_flags_t;
        { _XBF_PAGES,           "PAGES" }, \
        { _XBF_KMEM,            "KMEM" }, \
        { _XBF_DELWRI_Q,        "DELWRI_Q" }, \
-       { _XBF_COMPOUND,        "COMPOUND" }, \
-       { _XBF_IN_FLIGHT,       "IN_FLIGHT" }
+       { _XBF_COMPOUND,        "COMPOUND" }
 
 
 /*
  * Internal state flags.
  */
 #define XFS_BSTATE_DISPOSE      (1 << 0)       /* buffer being discarded */
+#define XFS_BSTATE_IN_FLIGHT    (1 << 1)       /* I/O in flight */
 
 /*
  * The xfs_buftarg contains 2 notions of "sector size" -
index 35703a80137208b81cb38ab91ce50e28d80a19f2..5fb5a0958a1485db46a2f753446c11828ff1913e 100644 (file)
@@ -1043,49 +1043,17 @@ xfs_find_get_desired_pgoff(
 
        index = startoff >> PAGE_SHIFT;
        endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
-       end = endoff >> PAGE_SHIFT;
+       end = (endoff - 1) >> PAGE_SHIFT;
        do {
                int             want;
                unsigned        nr_pages;
                unsigned int    i;
 
-               want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+               want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
                nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
                                          want);
-               /*
-                * No page mapped into given range.  If we are searching holes
-                * and if this is the first time we got into the loop, it means
-                * that the given offset is landed in a hole, return it.
-                *
-                * If we have already stepped through some block buffers to find
-                * holes but they all contains data.  In this case, the last
-                * offset is already updated and pointed to the end of the last
-                * mapped page, if it does not reach the endpoint to search,
-                * that means there should be a hole between them.
-                */
-               if (nr_pages == 0) {
-                       /* Data search found nothing */
-                       if (type == DATA_OFF)
-                               break;
-
-                       ASSERT(type == HOLE_OFF);
-                       if (lastoff == startoff || lastoff < endoff) {
-                               found = true;
-                               *offset = lastoff;
-                       }
-                       break;
-               }
-
-               /*
-                * At lease we found one page.  If this is the first time we
-                * step into the loop, and if the first page index offset is
-                * greater than the given search offset, a hole was found.
-                */
-               if (type == HOLE_OFF && lastoff == startoff &&
-                   lastoff < page_offset(pvec.pages[0])) {
-                       found = true;
+               if (nr_pages == 0)
                        break;
-               }
 
                for (i = 0; i < nr_pages; i++) {
                        struct page     *page = pvec.pages[i];
@@ -1098,18 +1066,18 @@ xfs_find_get_desired_pgoff(
                         * file mapping. However, page->index will not change
                         * because we have a reference on the page.
                         *
-                        * Searching done if the page index is out of range.
-                        * If the current offset is not reaches the end of
-                        * the specified search range, there should be a hole
-                        * between them.
+                        * If current page offset is beyond where we've ended,
+                        * we've found a hole.
                         */
-                       if (page->index > end) {
-                               if (type == HOLE_OFF && lastoff < endoff) {
-                                       *offset = lastoff;
-                                       found = true;
-                               }
+                       if (type == HOLE_OFF && lastoff < endoff &&
+                           lastoff < page_offset(pvec.pages[i])) {
+                               found = true;
+                               *offset = lastoff;
                                goto out;
                        }
+                       /* Searching done if the page index is out of range. */
+                       if (page->index > end)
+                               goto out;
 
                        lock_page(page);
                        /*
@@ -1151,21 +1119,20 @@ xfs_find_get_desired_pgoff(
 
                /*
                 * The number of returned pages less than our desired, search
-                * done.  In this case, nothing was found for searching data,
-                * but we found a hole behind the last offset.
+                * done.
                 */
-               if (nr_pages < want) {
-                       if (type == HOLE_OFF) {
-                               *offset = lastoff;
-                               found = true;
-                       }
+               if (nr_pages < want)
                        break;
-               }
 
                index = pvec.pages[i - 1]->index + 1;
                pagevec_release(&pvec);
        } while (index <= end);
 
+       /* No page at lastoff and we are not done - we found a hole. */
+       if (type == HOLE_OFF && lastoff < endoff) {
+               *offset = lastoff;
+               found = true;
+       }
 out:
        pagevec_release(&pvec);
        return found;
index 3683819887a5658eff23d6e3258d21567bbdc0b4..814ed729881d9a4305c3dd5646d75ef0f112b87b 100644 (file)
@@ -828,6 +828,7 @@ xfs_getfsmap(
        struct xfs_fsmap                dkeys[2];       /* per-dev keys */
        struct xfs_getfsmap_dev         handlers[XFS_GETFSMAP_DEVS];
        struct xfs_getfsmap_info        info = { NULL };
+       bool                            use_rmap;
        int                             i;
        int                             error = 0;
 
@@ -837,12 +838,14 @@ xfs_getfsmap(
            !xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1]))
                return -EINVAL;
 
+       use_rmap = capable(CAP_SYS_ADMIN) &&
+                  xfs_sb_version_hasrmapbt(&mp->m_sb);
        head->fmh_entries = 0;
 
        /* Set up our device handlers. */
        memset(handlers, 0, sizeof(handlers));
        handlers[0].dev = new_encode_dev(mp->m_ddev_targp->bt_dev);
-       if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+       if (use_rmap)
                handlers[0].fn = xfs_getfsmap_datadev_rmapbt;
        else
                handlers[0].fn = xfs_getfsmap_datadev_bnobt;
index c0bd0d7651a947bf06407d7c680dde5c377b9b26..bb837310c07e98c529472abceda0a8b426d8c9a5 100644 (file)
@@ -913,4 +913,55 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux);
 int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
 int drm_dp_stop_crc(struct drm_dp_aux *aux);
 
+struct drm_dp_dpcd_ident {
+       u8 oui[3];
+       u8 device_id[6];
+       u8 hw_rev;
+       u8 sw_major_rev;
+       u8 sw_minor_rev;
+} __packed;
+
+/**
+ * struct drm_dp_desc - DP branch/sink device descriptor
+ * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
+ * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
+ */
+struct drm_dp_desc {
+       struct drm_dp_dpcd_ident ident;
+       u32 quirks;
+};
+
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+                    bool is_branch);
+
+/**
+ * enum drm_dp_quirk - Display Port sink/branch device specific quirks
+ *
+ * Display Port sink and branch devices in the wild have a variety of bugs, try
+ * to collect them here. The quirks are shared, but it's up to the drivers to
+ * implement workarounds for them.
+ */
+enum drm_dp_quirk {
+       /**
+        * @DP_DPCD_QUIRK_LIMITED_M_N:
+        *
+        * The device requires main link attributes Mvid and Nvid to be limited
+        * to 16 bits.
+        */
+       DP_DPCD_QUIRK_LIMITED_M_N,
+};
+
+/**
+ * drm_dp_has_quirk() - does the DP device have a specific quirk
+ * @desc: Device decriptor filled by drm_dp_read_desc()
+ * @quirk: Quirk to query for
+ *
+ * Return true if DP device identified by @desc has @quirk.
+ */
+static inline bool
+drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+{
+       return desc->quirks & BIT(quirk);
+}
+
 #endif /* _DRM_DP_HELPER_H_ */
index c47aa248c640bc5ea806d5112ce7f7b5fb6b7a46..fcd641032f8d3a87162b0e9f1a63e08ae16d10df 100644 (file)
@@ -238,7 +238,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
                                bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
-void blk_mq_abort_requeue_list(struct request_queue *q);
 void blk_mq_complete_request(struct request *rq);
 
 bool blk_mq_queue_stopped(struct request_queue *q);
index aa2e19182d990eedda70de69bf638db5856811d1..51c5bd64bd0022c8a3f197d846c23448e7bb6a6e 100644 (file)
@@ -3,6 +3,8 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/string.h>
+
 #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
 
 /*
  */
 
 # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
-extern const char *ceph_file_part(const char *s, int len);
 #  define dout(fmt, ...)                                               \
        pr_debug("%.*s %12.12s:%-4d : " fmt,                            \
                 8 - (int)sizeof(KBUILD_MODNAME), "    ",               \
-                ceph_file_part(__FILE__, sizeof(__FILE__)),            \
-                __LINE__, ##__VA_ARGS__)
+                kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
 # else
 /* faux printk call just to see any compiler warnings. */
 #  define dout(fmt, ...)       do {                            \
index 21745946cae154f53cd87311e9350465388f70a5..ec47101cb1bf80f0867dbcff1d6aa10878df7418 100644 (file)
@@ -48,6 +48,7 @@ enum {
        CSS_ONLINE      = (1 << 1), /* between ->css_online() and ->css_offline() */
        CSS_RELEASED    = (1 << 2), /* refcnt reached zero, released */
        CSS_VISIBLE     = (1 << 3), /* css is visible to userland */
+       CSS_DYING       = (1 << 4), /* css is dying */
 };
 
 /* bits in struct cgroup flags field */
index ed2573e149faf070714e04f8160f7056b2fb1d3e..710a005c6b7a652bb9c32b5457dbd64196f6f4a0 100644 (file)
@@ -343,6 +343,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
        return true;
 }
 
+/**
+ * css_is_dying - test whether the specified css is dying
+ * @css: target css
+ *
+ * Test whether @css is in the process of offlining or already offline.  In
+ * most cases, ->css_online() and ->css_offline() callbacks should be
+ * enough; however, the actual offline operations are RCU delayed and this
+ * test returns %true also when @css is scheduled to be offlined.
+ *
+ * This is useful, for example, when the use case requires synchronous
+ * behavior with respect to cgroup removal.  cgroup removal schedules css
+ * offlining but the css can seem alive while the operation is being
+ * delayed.  If the delay affects user visible semantics, this test can be
+ * used to resolve the situation.
+ */
+static inline bool css_is_dying(struct cgroup_subsys_state *css)
+{
+       return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+}
+
 /**
  * css_put - put a css reference
  * @css: target css
index de179993e039d41d7e9034b5744be40954f81c09..ea9126006a69f94c7b8ea1705bf43bdf7b48e16c 100644 (file)
  * with any version that can compile the kernel
  */
 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+/*
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function.  This turns out to avoid the need for complex #ifdef
+ * directives.  Suppress the warning in clang as well.
+ */
+#define inline inline __attribute__((unused))
index 56197f82af45fb0f7cf492961fe1eb1956105735..62d948f80730fdd94c587b4f0235c72cf5d3399a 100644 (file)
@@ -272,6 +272,16 @@ struct bpf_prog_aux;
                .off   = OFF,                                   \
                .imm   = IMM })
 
+/* Unconditional jumps, goto pc + off16 */
+
+#define BPF_JMP_A(OFF)                                         \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_JA,                      \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
 /* Function call */
 
 #define BPF_EMIT_CALL(FUNC)                                    \
index 2b1a44f5bdb60e6a28d874630f71dec7a0f4c40b..a89d37e8b3873cc8e6fa79389a6c0d3a2f4843ab 100644 (file)
@@ -41,7 +41,7 @@ struct vm_area_struct;
 #define ___GFP_WRITE           0x800000u
 #define ___GFP_KSWAPD_RECLAIM  0x1000000u
 #ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP       0x4000000u
+#define ___GFP_NOLOCKDEP       0x2000000u
 #else
 #define ___GFP_NOLOCKDEP       0
 #endif
index c0d712d22b079ebc16129ef2618f41762276cf5e..f738d50cc17d3fcaa0b9b7cf681b70dd2646897d 100644 (file)
@@ -56,7 +56,14 @@ struct gpiod_lookup_table {
        .flags = _flags,                                                  \
 }
 
+#ifdef CONFIG_GPIOLIB
 void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
 void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
+#else
+static inline
+void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
+static inline
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
+#endif
 
 #endif /* __LINUX_GPIO_MACHINE_H */
index 8d5fcd6284ce0f4702d9eb28703bf2ae80d7e399..283dc2f5364d77c13df5a16ed0474154ee6724ff 100644 (file)
@@ -614,14 +614,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
 static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
                                                    netdev_features_t features)
 {
-       if (skb_vlan_tagged_multi(skb))
-               features = netdev_intersect_features(features,
-                                                    NETIF_F_SG |
-                                                    NETIF_F_HIGHDMA |
-                                                    NETIF_F_FRAGLIST |
-                                                    NETIF_F_HW_CSUM |
-                                                    NETIF_F_HW_VLAN_CTAG_TX |
-                                                    NETIF_F_HW_VLAN_STAG_TX);
+       if (skb_vlan_tagged_multi(skb)) {
+               /* In the case of multi-tagged packets, use a direct mask
+                * instead of using netdev_interesect_features(), to make
+                * sure that only devices supporting NETIF_F_HW_CSUM will
+                * have checksum offloading support.
+                */
+               features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+                           NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
+                           NETIF_F_HW_VLAN_STAG_TX;
+       }
 
        return features;
 }
index 36872fbb815d72203e14582e3dab6ba5051ee8a7..734377ad42e9f0e4719edc4750b57245fa2d3f17 100644 (file)
@@ -64,13 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate);
 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
 
+#ifndef __jiffy_arch_data
+#define __jiffy_arch_data
+#endif
+
 /*
  * The 64-bit value is not atomic - you MUST NOT read it
  * without sampling the sequence number in jiffies_lock.
  * get_jiffies_64() will do this for you as appropriate.
  */
 extern u64 __cacheline_aligned_in_smp jiffies_64;
-extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
+extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
 
 #if (BITS_PER_LONG < 64)
 u64 get_jiffies_64(void);
index 4ce24a3762627be20e805da3eaab26ba1bc47578..8098695e5d8d9dfba3815fd359823f92833a5d61 100644 (file)
@@ -425,12 +425,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
 }
 #endif
 
+extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+               phys_addr_t end_addr);
 #else
 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
 {
        return 0;
 }
 
+static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+               phys_addr_t end_addr)
+{
+       return 0;
+}
+
 #endif /* CONFIG_HAVE_MEMBLOCK */
 
 #endif /* __KERNEL__ */
index b4ee8f62ce8da82720cb79e7a9ea3ec97703b849..8e2828d48d7fcf6a7ba683bf51c8c91a1ad28ec0 100644 (file)
@@ -470,6 +470,7 @@ struct mlx4_update_qp_params {
        u16     rate_val;
 };
 
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params);
index dd9a263ed368d5476b06a3464bbfada0436cf6e2..a940ec6a046cfd0dc8c3016226c71ea9e2c343e5 100644 (file)
@@ -787,8 +787,14 @@ enum {
 };
 
 enum {
-       CQE_RSS_HTYPE_IP        = 0x3 << 6,
-       CQE_RSS_HTYPE_L4        = 0x3 << 2,
+       CQE_RSS_HTYPE_IP        = 0x3 << 2,
+       /* cqe->rss_hash_type[3:2] - IP destination selected for hash
+        * (00 = none,  01 = IPv4, 10 = IPv6, 11 = Reserved)
+        */
+       CQE_RSS_HTYPE_L4        = 0x3 << 6,
+       /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
+        * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
+        */
 };
 
 enum {
index bcdf739ee41a2cf38fe537e48172e9ca9053e3b9..93273d9ea4d145f125846f0c9a56a5a6e74915e4 100644 (file)
@@ -787,7 +787,12 @@ enum {
 
 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
 
+enum {
+       MLX5_CMD_ENT_STATE_PENDING_COMP,
+};
+
 struct mlx5_cmd_work_ent {
+       unsigned long           state;
        struct mlx5_cmd_msg    *in;
        struct mlx5_cmd_msg    *out;
        void                   *uout;
@@ -976,7 +981,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                       int nent, u64 mask, const char *name,
index 32de0724b40009adc2a802dcc5abafbb5505c0b1..edafedb7b509010c904fccf2cdeffea1afa9317b 100644 (file)
@@ -766,6 +766,12 @@ enum {
        MLX5_CAP_PORT_TYPE_ETH = 0x1,
 };
 
+enum {
+       MLX5_CAP_UMR_FENCE_STRONG       = 0x0,
+       MLX5_CAP_UMR_FENCE_SMALL        = 0x1,
+       MLX5_CAP_UMR_FENCE_NONE         = 0x2,
+};
+
 struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_0[0x80];
 
@@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_202[0x1];
        u8         ipoib_enhanced_offloads[0x1];
        u8         ipoib_basic_offloads[0x1];
-       u8         reserved_at_205[0xa];
+       u8         reserved_at_205[0x5];
+       u8         umr_fence[0x2];
+       u8         reserved_at_20c[0x3];
        u8         drain_sigerr[0x1];
        u8         cmdif_checksum[0x2];
        u8         sigerr_cqe[0x1];
index 7cb17c6b97de38b1e8d55ca6d8c90b8415a9c3fa..b892e95d4929d311b51877dfb9eb3de67780bbdf 100644 (file)
@@ -2327,6 +2327,17 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
 #define FOLL_REMOTE    0x2000  /* we are working on non-current tsk/mm */
 #define FOLL_COW       0x4000  /* internal GUP flag */
 
+static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
+{
+       if (vm_fault & VM_FAULT_OOM)
+               return -ENOMEM;
+       if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+               return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
+       if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
+               return -EFAULT;
+       return 0;
+}
+
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
index ebaccd4e7d8cdc5f5ef13fed1475a8b202d93628..ef6a13b7bd3e851385bea32434e207a5cf6eec7f 100644 (file)
@@ -678,6 +678,7 @@ typedef struct pglist_data {
         * is the first PFN that needs to be initialised.
         */
        unsigned long first_deferred_pfn;
+       unsigned long static_init_size;
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 566fda587fcf7a76af1c6a01e84ce33336606f3f..3f74ef2281e8afac1e4667b4fbf4abcc5f89ff17 100644 (file)
@@ -467,6 +467,7 @@ enum dmi_field {
        DMI_PRODUCT_VERSION,
        DMI_PRODUCT_SERIAL,
        DMI_PRODUCT_UUID,
+       DMI_PRODUCT_FAMILY,
        DMI_BOARD_VENDOR,
        DMI_BOARD_NAME,
        DMI_BOARD_VERSION,
index be378cf47fcc93fa2c89c6cd870f0a9fecd65211..b3044c2c62cbe8e856605129aa17f111e2a21493 100644 (file)
@@ -294,7 +294,7 @@ int xt_match_to_user(const struct xt_entry_match *m,
 int xt_target_to_user(const struct xt_entry_target *t,
                      struct xt_entry_target __user *u);
 int xt_data_to_user(void __user *dst, const void *src,
-                   int usersize, int size);
+                   int usersize, int size, int aligned_size);
 
 void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
                                 struct xt_counters_info *info, bool compat);
index a30efb437e6d1cfa42a75ec72997bf12b9cdf59d..e0cbf17af780e1d3e4c2be6bba351c9a27cebf88 100644 (file)
@@ -125,4 +125,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
 /* True if the target is not a standard target */
 #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
 
+static inline bool ebt_invalid_target(int target)
+{
+       return (target < -NUM_STANDARD_TARGETS || target >= 0);
+}
+
 #endif
index dc8224ae28d5d9e6106dc0d06a8a9bc2eb85fd0a..e0d1946270f38e5238ddf0a3bb25cf03c3f3ebe4 100644 (file)
@@ -64,6 +64,7 @@ extern struct platform_device *of_platform_device_create(struct device_node *np,
                                                   const char *bus_id,
                                                   struct device *parent);
 
+extern int of_platform_device_destroy(struct device *dev, void *data);
 extern int of_platform_bus_probe(struct device_node *root,
                                 const struct of_device_id *matches,
                                 struct device *parent);
index 33c2b0b77429d09aaa31cb9735458db5762e08a7..8039f9f0ca054ba20fd9992b13b7926859d029b5 100644 (file)
@@ -183,6 +183,11 @@ enum pci_dev_flags {
        PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
        /* Do not use FLR even if device advertises PCI_AF_CAP */
        PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
+       /*
+        * Resume before calling the driver's system suspend hooks, disabling
+        * the direct_complete optimization.
+        */
+       PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
 };
 
 enum pci_irq_reroute_variant {
@@ -1342,9 +1347,9 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
                               unsigned int max_vecs, unsigned int flags,
                               const struct irq_affinity *aff_desc)
 {
-       if (min_vecs > 1)
-               return -EINVAL;
-       return 1;
+       if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
+               return 1;
+       return -ENOSPC;
 }
 
 static inline void pci_free_irq_vectors(struct pci_dev *dev)
index 279e3c5326e3a4e65bf6f86556ea984a69d0ca9c..7620eb127cffc5edbc475457732a042bac357055 100644 (file)
@@ -42,8 +42,6 @@
  * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
  *     impedance to VDD). If the argument is != 0 pull-up is enabled,
  *     if it is 0, pull-up is total, i.e. the pin is connected to VDD.
- * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous
- *     input and output operations.
  * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
  *     collector) which means it is usually wired with other output ports
  *     which are then pulled up with an external resistor. Setting this
@@ -98,7 +96,6 @@ enum pin_config_param {
        PIN_CONFIG_BIAS_PULL_DOWN,
        PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
        PIN_CONFIG_BIAS_PULL_UP,
-       PIN_CONFIG_BIDIRECTIONAL,
        PIN_CONFIG_DRIVE_OPEN_DRAIN,
        PIN_CONFIG_DRIVE_OPEN_SOURCE,
        PIN_CONFIG_DRIVE_PUSH_PULL,
index 422bc2e4cb6a6fc47571d28cb0602d59477d7caf..ef3eb8bbfee482e04aa06c83b21fbc2ce02d50b3 100644 (file)
@@ -54,7 +54,8 @@ extern int ptrace_request(struct task_struct *child, long request,
                          unsigned long addr, unsigned long data);
 extern void ptrace_notify(int exit_code);
 extern void __ptrace_link(struct task_struct *child,
-                         struct task_struct *new_parent);
+                         struct task_struct *new_parent,
+                         const struct cred *ptracer_cred);
 extern void __ptrace_unlink(struct task_struct *child);
 extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
 #define PTRACE_MODE_READ       0x01
@@ -206,7 +207,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
 
        if (unlikely(ptrace) && current->ptrace) {
                child->ptrace = current->ptrace;
-               __ptrace_link(child, current->parent);
+               __ptrace_link(child, current->parent, current->ptracer_cred);
 
                if (child->ptrace & PT_SEIZED)
                        task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
@@ -215,6 +216,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
 
                set_tsk_thread_flag(child, TIF_SIGPENDING);
        }
+       else
+               child->ptracer_cred = NULL;
 }
 
 /**
index cda76c6506ca411c42c5e1cdf6d287263d97dde6..e69402d4a8aecbdc6fe4a40e8ad7e01957badbd6 100644 (file)
@@ -195,6 +195,7 @@ int serdev_device_open(struct serdev_device *);
 void serdev_device_close(struct serdev_device *);
 unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
 void serdev_device_set_flow_control(struct serdev_device *, bool);
+int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
 void serdev_device_wait_until_sent(struct serdev_device *, long);
 int serdev_device_get_tiocm(struct serdev_device *);
 int serdev_device_set_tiocm(struct serdev_device *, int, int);
@@ -236,6 +237,12 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
        return 0;
 }
 static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
+static inline int serdev_device_write_buf(struct serdev_device *serdev,
+                                         const unsigned char *buf,
+                                         size_t count)
+{
+       return -ENODEV;
+}
 static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
 static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
 {
@@ -301,7 +308,7 @@ struct tty_driver;
 struct device *serdev_tty_port_register(struct tty_port *port,
                                        struct device *parent,
                                        struct tty_driver *drv, int idx);
-void serdev_tty_port_unregister(struct tty_port *port);
+int serdev_tty_port_unregister(struct tty_port *port);
 #else
 static inline struct device *serdev_tty_port_register(struct tty_port *port,
                                           struct device *parent,
@@ -309,14 +316,10 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port,
 {
        return ERR_PTR(-ENODEV);
 }
-static inline void serdev_tty_port_unregister(struct tty_port *port) {}
-#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
-
-static inline int serdev_device_write_buf(struct serdev_device *serdev,
-                                         const unsigned char *data,
-                                         size_t count)
+static inline int serdev_tty_port_unregister(struct tty_port *port)
 {
-       return serdev_device_write(serdev, data, count, 0);
+       return -ENODEV;
 }
+#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
 
 #endif /*_LINUX_SERDEV_H */
index 94631026f79c56f022976a85dcde92379507e87c..11cef5a7bc87a9fe67a4bfbfca9f52e41d0abd88 100644 (file)
@@ -336,7 +336,8 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
 {
        char *cp = (char *)p;
        struct kvec *vec = &rqstp->rq_arg.head[0];
-       return cp == (char *)vec->iov_base + vec->iov_len;
+       return cp >= (char*)vec->iov_base
+               && cp <= (char*)vec->iov_base + vec->iov_len;
 }
 
 static inline int
index 0b1cf32edfd7ba1c456252124e23c68450d5bcc3..d9718378a8bee0b327d08c2e80a6fd3b5490b967 100644 (file)
@@ -189,8 +189,6 @@ struct platform_suspend_ops {
 struct platform_freeze_ops {
        int (*begin)(void);
        int (*prepare)(void);
-       void (*wake)(void);
-       void (*sync)(void);
        void (*restore)(void);
        void (*end)(void);
 };
@@ -430,8 +428,7 @@ extern unsigned int pm_wakeup_irq;
 
 extern bool pm_wakeup_pending(void);
 extern void pm_system_wakeup(void);
-extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(void);
 extern void pm_system_irq_wakeup(unsigned int irq_number);
 extern bool pm_get_wakeup_count(unsigned int *count, bool block);
 extern bool pm_save_wakeup_count(unsigned int count);
@@ -481,7 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
 
 static inline bool pm_wakeup_pending(void) { return false; }
 static inline void pm_system_wakeup(void) {}
-static inline void pm_wakeup_clear(bool reset) {}
+static inline void pm_wakeup_clear(void) {}
 static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
 
 static inline void lock_system_sleep(void) {}
index d07cd2105a6c6a3bdcac20c918622c7ad4ea0840..eccb4ec30a8a7b94064ff1ede0645234b94ae49c 100644 (file)
@@ -558,6 +558,15 @@ extern struct device *tty_port_register_device_attr(struct tty_port *port,
                struct tty_driver *driver, unsigned index,
                struct device *device, void *drvdata,
                const struct attribute_group **attr_grp);
+extern struct device *tty_port_register_device_serdev(struct tty_port *port,
+               struct tty_driver *driver, unsigned index,
+               struct device *device);
+extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
+               struct tty_driver *driver, unsigned index,
+               struct device *device, void *drvdata,
+               const struct attribute_group **attr_grp);
+extern void tty_port_unregister_device(struct tty_port *port,
+               struct tty_driver *driver, unsigned index);
 extern int tty_port_alloc_xmit_buf(struct tty_port *port);
 extern void tty_port_free_xmit_buf(struct tty_port *port);
 extern void tty_port_destroy(struct tty_port *port);
index 7dffa5624ea62bee992e547c44ed4a86a577b0a7..97116379db5ff6a850e078c35047fe995f45b265 100644 (file)
@@ -206,6 +206,7 @@ struct cdc_state {
 };
 
 extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
+extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf);
 extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
 extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
 extern void usbnet_cdc_status(struct usbnet *, struct urb *);
index eb50ce54b759154b99cf333e177adf8ca229c69b..413335c8cb529a8506a2f934577c3413512d8c97 100644 (file)
@@ -29,7 +29,7 @@ struct edid;
 struct cec_adapter;
 struct cec_notifier;
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
 
 /**
  * cec_notifier_get - find or create a new cec_notifier for the given device.
index b8eb895731d561a5f0ff4ec218055581ad6e21ee..bfa88d4d67e1d6663da4952a6a097e32f5e573a8 100644 (file)
@@ -173,7 +173,7 @@ struct cec_adapter {
        bool passthrough;
        struct cec_log_addrs log_addrs;
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
        struct cec_notifier *notifier;
 #endif
 
@@ -300,7 +300,7 @@ u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
  */
 int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
 void cec_register_cec_notifier(struct cec_adapter *adap,
                               struct cec_notifier *notifier);
 #endif
index 049af33da3b6c95897d544670cea65c542317673..cfc0437841665d7ed46a714915c50d723c24901c 100644 (file)
@@ -107,10 +107,16 @@ struct dst_entry {
        };
 };
 
+struct dst_metrics {
+       u32             metrics[RTAX_MAX];
+       atomic_t        refcnt;
+};
+extern const struct dst_metrics dst_default_metrics;
+
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
-extern const u32 dst_default_metrics[];
 
 #define DST_METRICS_READ_ONLY          0x1UL
+#define DST_METRICS_REFCOUNTED         0x2UL
 #define DST_METRICS_FLAGS              0x3UL
 #define __DST_METRICS_PTR(Y)   \
        ((u32 *)((Y) & ~DST_METRICS_FLAGS))
index 6692c5758b332d468f1e0611ecc4f3e03ae03b2b..f7f6aa789c6174c41ca9739206d586c559c1f3a1 100644 (file)
@@ -114,11 +114,11 @@ struct fib_info {
        __be32                  fib_prefsrc;
        u32                     fib_tb_id;
        u32                     fib_priority;
-       u32                     *fib_metrics;
-#define fib_mtu fib_metrics[RTAX_MTU-1]
-#define fib_window fib_metrics[RTAX_WINDOW-1]
-#define fib_rtt fib_metrics[RTAX_RTT-1]
-#define fib_advmss fib_metrics[RTAX_ADVMSS-1]
+       struct dst_metrics      *fib_metrics;
+#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
+#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
+#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
+#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
        int                     fib_nhs;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        int                     fib_weight;
index dbf0abba33b8da21be05abf6e719f69542da80fc..3e505bbff8ca4a41f8d39fefcd59aa01b85424f4 100644 (file)
@@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
  */
 extern const struct proto_ops inet6_stream_ops;
 extern const struct proto_ops inet6_dgram_ops;
+extern const struct proto_ops inet6_sockraw_ops;
 
 struct group_source_req;
 struct group_filter;
index e04fa7691e5d6873cd04e94816227d4a41275fa2..c519bb5b5bb8806089886caacaca4846fe3eeb98 100644 (file)
@@ -9,6 +9,7 @@
 
 #ifndef _NF_CONNTRACK_HELPER_H
 #define _NF_CONNTRACK_HELPER_H
+#include <linux/refcount.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_expect.h>
@@ -26,6 +27,7 @@ struct nf_conntrack_helper {
        struct hlist_node hnode;        /* Internal use. */
 
        char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */
+       refcount_t refcnt;
        struct module *me;              /* pointer to self */
        const struct nf_conntrack_expect_policy *expect_policy;
 
@@ -79,6 +81,8 @@ struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
 struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name,
                                                               u16 l3num,
                                                               u8 protonum);
+void nf_conntrack_helper_put(struct nf_conntrack_helper *helper);
+
 void nf_ct_helper_init(struct nf_conntrack_helper *helper,
                       u16 l3num, u16 protonum, const char *name,
                       u16 default_port, u16 spec_port, u32 id,
index 028faec8fc2799b0c176ac88b54fc6700e89f896..8a8bab8d7b15a8e9c746a899dcf474740e9f6f25 100644 (file)
@@ -176,7 +176,7 @@ struct nft_data_desc {
 int nft_data_init(const struct nft_ctx *ctx,
                  struct nft_data *data, unsigned int size,
                  struct nft_data_desc *desc, const struct nlattr *nla);
-void nft_data_uninit(const struct nft_data *data, enum nft_data_types type);
+void nft_data_release(const struct nft_data *data, enum nft_data_types type);
 int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
                  enum nft_data_types type, unsigned int len);
 
index f31fb6331a532eed7258177d5cfbb48fde9a594a..3248beaf16b0513283cd3c863695a77d09793cdc 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <net/act_api.h>
+#include <linux/tc_act/tc_csum.h>
 
 struct tcf_csum {
        struct tc_action common;
@@ -11,4 +12,18 @@ struct tcf_csum {
 };
 #define to_tcf_csum(a) ((struct tcf_csum *)a)
 
+static inline bool is_tcf_csum(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       if (a->ops && a->ops->type == TCA_ACT_CSUM)
+               return true;
+#endif
+       return false;
+}
+
+static inline u32 tcf_csum_update_flags(const struct tc_action *a)
+{
+       return to_tcf_csum(a)->update_flags;
+}
+
 #endif /* __NET_TC_CSUM_H */
index 38a7427ae902e35973a8b7fa0e95ff602ede0e87..be6223c586fa05b3ef1dbcb96e73cf7b5dae292d 100644 (file)
@@ -924,7 +924,7 @@ struct tcp_congestion_ops {
        void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
        /* call when ack arrives (optional) */
        void (*in_ack_event)(struct sock *sk, u32 flags);
-       /* new value of cwnd after loss (optional) */
+       /* new value of cwnd after loss (required) */
        u32  (*undo_cwnd)(struct sock *sk);
        /* hook for packet ack accounting (optional) */
        void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
index 6793a30c66b1f054516a27229c29037fcdfa2f6f..7e7e2b0d29157047fa0d3596b3f97cf501d754f9 100644 (file)
@@ -979,10 +979,6 @@ struct xfrm_dst {
        struct flow_cache_object flo;
        struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
        int num_pols, num_xfrms;
-#ifdef CONFIG_XFRM_SUB_POLICY
-       struct flowi *origin;
-       struct xfrm_selector *partner;
-#endif
        u32 xfrm_genid;
        u32 policy_genid;
        u32 route_mtu_cached;
@@ -998,12 +994,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
        dst_release(xdst->route);
        if (likely(xdst->u.dst.xfrm))
                xfrm_state_put(xdst->u.dst.xfrm);
-#ifdef CONFIG_XFRM_SUB_POLICY
-       kfree(xdst->origin);
-       xdst->origin = NULL;
-       kfree(xdst->partner);
-       xdst->partner = NULL;
-#endif
 }
 #endif
 
index f5f70e345318151356e022ce46b623a357031920..355b81f4242defd82a3802cd47e2a28abfb24ee5 100644 (file)
@@ -158,7 +158,6 @@ enum sa_path_rec_type {
 };
 
 struct sa_path_rec_ib {
-       __be64       service_id;
        __be16       dlid;
        __be16       slid;
        u8           raw_traffic;
@@ -174,7 +173,6 @@ struct sa_path_rec_roce {
 };
 
 struct sa_path_rec_opa {
-       __be64       service_id;
        __be32       dlid;
        __be32       slid;
        u8           raw_traffic;
@@ -189,6 +187,7 @@ struct sa_path_rec_opa {
 struct sa_path_rec {
        union ib_gid dgid;
        union ib_gid sgid;
+       __be64       service_id;
        /* reserved */
        __be32       flow_label;
        u8           hop_limit;
@@ -262,7 +261,7 @@ static inline void path_conv_opa_to_ib(struct sa_path_rec *ib,
                ib->ib.dlid     = htons(ntohl(opa->opa.dlid));
                ib->ib.slid     = htons(ntohl(opa->opa.slid));
        }
-       ib->ib.service_id       = opa->opa.service_id;
+       ib->service_id          = opa->service_id;
        ib->ib.raw_traffic      = opa->opa.raw_traffic;
 }
 
@@ -281,7 +280,7 @@ static inline void path_conv_ib_to_opa(struct sa_path_rec *opa,
        }
        opa->opa.slid           = slid;
        opa->opa.dlid           = dlid;
-       opa->opa.service_id     = ib->ib.service_id;
+       opa->service_id         = ib->service_id;
        opa->opa.raw_traffic    = ib->ib.raw_traffic;
 }
 
@@ -591,15 +590,6 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
                (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
 }
 
-static inline void sa_path_set_service_id(struct sa_path_rec *rec,
-                                         __be64 service_id)
-{
-       if (rec->rec_type == SA_PATH_REC_TYPE_IB)
-               rec->ib.service_id = service_id;
-       else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
-               rec->opa.service_id = service_id;
-}
-
 static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
 {
        if (rec->rec_type == SA_PATH_REC_TYPE_IB)
@@ -625,15 +615,6 @@ static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec,
                rec->opa.raw_traffic = raw_traffic;
 }
 
-static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec)
-{
-       if (rec->rec_type == SA_PATH_REC_TYPE_IB)
-               return rec->ib.service_id;
-       else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
-               return rec->opa.service_id;
-       return 0;
-}
-
 static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
 {
        if (rec->rec_type == SA_PATH_REC_TYPE_IB)
index 5852661443290d52eb8a3e719716452d752b6eaa..348c102cb5f6afdbe9f90a7c788431bac0219226 100644 (file)
@@ -10,9 +10,6 @@ struct ibnl_client_cbs {
        struct module *module;
 };
 
-int ibnl_init(void);
-void ibnl_cleanup(void);
-
 /**
  * Add a a client to the list of IB netlink exporters.
  * @index: Index of the added client
@@ -77,11 +74,4 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
 int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
                        unsigned int group, gfp_t flags);
 
-/**
- * Check if there are any listeners to the netlink group
- * @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
- */
-int ibnl_chk_listeners(unsigned int group);
-
 #endif /* _RDMA_NETLINK_H */
index 275581d483ddd90d97c550ee8bf44d705833ecf8..5f17fb770477bbdfa2729a7b35cf21f70493515e 100644 (file)
@@ -557,6 +557,7 @@ struct iscsi_conn {
 #define LOGIN_FLAGS_READ_ACTIVE                1
 #define LOGIN_FLAGS_CLOSED             2
 #define LOGIN_FLAGS_READY              4
+#define LOGIN_FLAGS_INITIAL_PDU                8
        unsigned long           login_flags;
        struct delayed_work     login_work;
        struct delayed_work     login_cleanup_work;
index 5e00b2333c26e3256a6f4dc51a2a58011a2c9db6..172dc8ee0e3bda95c39f7e037e7a33baf8012214 100644 (file)
@@ -86,6 +86,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        array->map.key_size = attr->key_size;
        array->map.value_size = attr->value_size;
        array->map.max_entries = attr->max_entries;
+       array->map.map_flags = attr->map_flags;
        array->elem_size = elem_size;
 
        if (!percpu)
index 39cfafd895b80d2f0fb5054626af001949a8ad98..b09185f0f17d3428ac445de2ed52756004ca5368 100644 (file)
@@ -432,6 +432,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
        trie->map.key_size = attr->key_size;
        trie->map.value_size = attr->value_size;
        trie->map.max_entries = attr->max_entries;
+       trie->map.map_flags = attr->map_flags;
        trie->data_size = attr->key_size -
                          offsetof(struct bpf_lpm_trie_key, data);
        trie->max_prefixlen = trie->data_size * 8;
index 4dfd6f2ec2f9725681f490971e60acc8033c64b6..31147d730abf532cc8f10efc9a871b5dd498928d 100644 (file)
@@ -88,6 +88,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
        smap->map.key_size = attr->key_size;
        smap->map.value_size = value_size;
        smap->map.max_entries = attr->max_entries;
+       smap->map.map_flags = attr->map_flags;
        smap->n_buckets = n_buckets;
        smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
index 1eddb713b815c3820dd996b4d34770e4c784ab71..339c8a1371de0201df0f7ac799280168ea4d22e3 100644 (file)
@@ -463,19 +463,22 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
        BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
 };
 
+static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno)
+{
+       BUG_ON(regno >= MAX_BPF_REG);
+
+       memset(&regs[regno], 0, sizeof(regs[regno]));
+       regs[regno].type = NOT_INIT;
+       regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
+       regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
+}
+
 static void init_reg_state(struct bpf_reg_state *regs)
 {
        int i;
 
-       for (i = 0; i < MAX_BPF_REG; i++) {
-               regs[i].type = NOT_INIT;
-               regs[i].imm = 0;
-               regs[i].min_value = BPF_REGISTER_MIN_RANGE;
-               regs[i].max_value = BPF_REGISTER_MAX_RANGE;
-               regs[i].min_align = 0;
-               regs[i].aux_off = 0;
-               regs[i].aux_off_align = 0;
-       }
+       for (i = 0; i < MAX_BPF_REG; i++)
+               mark_reg_not_init(regs, i);
 
        /* frame pointer */
        regs[BPF_REG_FP].type = FRAME_PTR;
@@ -808,11 +811,15 @@ static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
                reg_off += reg->aux_off;
        }
 
-       /* skb->data is NET_IP_ALIGN-ed, but for strict alignment checking
-        * we force this to 2 which is universally what architectures use
-        * when they don't set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
+       /* For platforms that do not have a Kconfig enabling
+        * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
+        * NET_IP_ALIGN is universally set to '2'.  And on platforms
+        * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
+        * to this code only in strict mode where we want to emulate
+        * the NET_IP_ALIGN==2 checking.  Therefore use an
+        * unconditional IP align value of '2'.
         */
-       ip_align = strict ? 2 : NET_IP_ALIGN;
+       ip_align = 2;
        if ((ip_align + reg_off + off) % size != 0) {
                verbose("misaligned packet access off %d+%d+%d size %d\n",
                        ip_align, reg_off, off, size);
@@ -839,9 +846,6 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
 {
        bool strict = env->strict_alignment;
 
-       if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
-               strict = true;
-
        switch (reg->type) {
        case PTR_TO_PACKET:
                return check_pkt_ptr_alignment(reg, off, size, strict);
@@ -1345,7 +1349,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
        struct bpf_verifier_state *state = &env->cur_state;
        const struct bpf_func_proto *fn = NULL;
        struct bpf_reg_state *regs = state->regs;
-       struct bpf_reg_state *reg;
        struct bpf_call_arg_meta meta;
        bool changes_data;
        int i, err;
@@ -1412,11 +1415,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
        }
 
        /* reset caller saved regs */
-       for (i = 0; i < CALLER_SAVED_REGS; i++) {
-               reg = regs + caller_saved[i];
-               reg->type = NOT_INIT;
-               reg->imm = 0;
-       }
+       for (i = 0; i < CALLER_SAVED_REGS; i++)
+               mark_reg_not_init(regs, caller_saved[i]);
 
        /* update return register */
        if (fn->ret_type == RET_INTEGER) {
@@ -2444,7 +2444,6 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
 {
        struct bpf_reg_state *regs = env->cur_state.regs;
        u8 mode = BPF_MODE(insn->code);
-       struct bpf_reg_state *reg;
        int i, err;
 
        if (!may_access_skb(env->prog->type)) {
@@ -2477,11 +2476,8 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
        }
 
        /* reset caller saved regs to unreadable */
-       for (i = 0; i < CALLER_SAVED_REGS; i++) {
-               reg = regs + caller_saved[i];
-               reg->type = NOT_INIT;
-               reg->imm = 0;
-       }
+       for (i = 0; i < CALLER_SAVED_REGS; i++)
+               mark_reg_not_init(regs, caller_saved[i]);
 
        /* mark destination R0 register as readable, since it contains
         * the value fetched from the packet
@@ -2692,7 +2688,8 @@ err_free:
 /* the following conditions reduce the number of explored insns
  * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
  */
-static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
+static bool compare_ptrs_to_packet(struct bpf_verifier_env *env,
+                                  struct bpf_reg_state *old,
                                   struct bpf_reg_state *cur)
 {
        if (old->id != cur->id)
@@ -2735,7 +2732,7 @@ static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
         * 'if (R4 > data_end)' and all further insn were already good with r=20,
         * so they will be good with r=30 and we can prune the search.
         */
-       if (old->off <= cur->off &&
+       if (!env->strict_alignment && old->off <= cur->off &&
            old->off >= old->range && cur->off >= cur->range)
                return true;
 
@@ -2806,7 +2803,7 @@ static bool states_equal(struct bpf_verifier_env *env,
                        continue;
 
                if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
-                   compare_ptrs_to_packet(rold, rcur))
+                   compare_ptrs_to_packet(env, rold, rcur))
                        continue;
 
                return false;
@@ -3584,10 +3581,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
        } else {
                log_level = 0;
        }
-       if (attr->prog_flags & BPF_F_STRICT_ALIGNMENT)
+
+       env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
+       if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
                env->strict_alignment = true;
-       else
-               env->strict_alignment = false;
 
        ret = replace_map_fd_with_map_ptr(env);
        if (ret < 0)
@@ -3693,7 +3690,10 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
        mutex_lock(&bpf_verifier_lock);
 
        log_level = 0;
+
        env->strict_alignment = false;
+       if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+               env->strict_alignment = true;
 
        env->explored_states = kcalloc(env->prog->len,
                                       sizeof(struct bpf_verifier_state_list *),
index c3c9a0e1b3c9a474bd80b8cb10ea1049284474b0..8d4e85eae42c08481899e415075ee42c6d12f90f 100644 (file)
@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css)
 {
        lockdep_assert_held(&cgroup_mutex);
 
+       if (css->flags & CSS_DYING)
+               return;
+
+       css->flags |= CSS_DYING;
+
        /*
         * This must happen before css is disassociated with its cgroup.
         * See seq_css() for details.
index f6501f4f6040b5a9c21e84aeb57e20906ef1c614..ae643412948added94f05d7efb120631f7798b44 100644 (file)
@@ -176,9 +176,9 @@ typedef enum {
 } cpuset_flagbits_t;
 
 /* convenient tests for these bits */
-static inline bool is_cpuset_online(const struct cpuset *cs)
+static inline bool is_cpuset_online(struct cpuset *cs)
 {
-       return test_bit(CS_ONLINE, &cs->flags);
+       return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
 }
 
 static inline int is_cpu_exclusive(const struct cpuset *cs)
index aa1076c5e4a9f3a5d9e6f58fef1c6f34e332de8c..e53770d2bf956bf5bf6ec1a42c75121552713da6 100644 (file)
@@ -1577,6 +1577,18 @@ static __latent_entropy struct task_struct *copy_process(
        if (!p)
                goto fork_out;
 
+       /*
+        * This _must_ happen before we call free_task(), i.e. before we jump
+        * to any of the bad_fork_* labels. This is to avoid freeing
+        * p->set_child_tid which is (ab)used as a kthread's data pointer for
+        * kernel threads (PF_KTHREAD).
+        */
+       p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+       /*
+        * Clear TID on mm_release()?
+        */
+       p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
+
        ftrace_graph_init_task(p);
 
        rt_mutex_init_task(p);
@@ -1743,11 +1755,6 @@ static __latent_entropy struct task_struct *copy_process(
                }
        }
 
-       p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
-       /*
-        * Clear TID on mm_release()?
-        */
-       p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
 #ifdef CONFIG_BLOCK
        p->plug = NULL;
 #endif
index 2d2d3a568e4e8b3aeedb16711c90e660f698d6cf..adfe3b4cfe05a101bcee0da8e72db6fe6a10cf72 100644 (file)
@@ -122,7 +122,7 @@ static void *alloc_insn_page(void)
        return module_alloc(PAGE_SIZE);
 }
 
-static void free_insn_page(void *page)
+void __weak free_insn_page(void *page)
 {
        module_memfree(page);
 }
index 0450225579367eee10de6695679fffa7b963f013..ec4565122e6553f490dfb2434b6c14102b152bb2 100644 (file)
@@ -10,6 +10,7 @@ config LIVEPATCH
        depends on SYSFS
        depends on KALLSYMS_ALL
        depends on HAVE_LIVEPATCH
+       depends on !TRIM_UNUSED_KSYMS
        help
          Say Y here if you want to support kernel live patching.
          This option has no runtime impact until a kernel "patch"
index b9550941690915bc23323cd772e060cda7d3a704..28cd09e635ed669fe6c93b28eb1d59e46bbf4615 100644 (file)
@@ -1785,12 +1785,14 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
        int ret;
 
        raw_spin_lock_irq(&lock->wait_lock);
-
-       set_current_state(TASK_INTERRUPTIBLE);
-
        /* sleep on the mutex */
+       set_current_state(TASK_INTERRUPTIBLE);
        ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
-
+       /*
+        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+        * have to fix that up.
+        */
+       fixup_rt_mutex_waiters(lock);
        raw_spin_unlock_irq(&lock->wait_lock);
 
        return ret;
@@ -1821,16 +1823,26 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
        bool cleanup = false;
 
        raw_spin_lock_irq(&lock->wait_lock);
+       /*
+        * Do an unconditional try-lock, this deals with the lock stealing
+        * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
+        * sets a NULL owner.
+        *
+        * We're not interested in the return value, because the subsequent
+        * test on rt_mutex_owner() will infer that. If the trylock succeeded,
+        * we will own the lock and it will have removed the waiter. If we
+        * failed the trylock, we're still not owner and we need to remove
+        * ourselves.
+        */
+       try_to_take_rt_mutex(lock, current, waiter);
        /*
         * Unless we're the owner; we're still enqueued on the wait_list.
         * So check if we became owner, if not, take us off the wait_list.
         */
        if (rt_mutex_owner(lock) != current) {
                remove_waiter(lock, waiter);
-               fixup_rt_mutex_waiters(lock);
                cleanup = true;
        }
-
        /*
         * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
         * have to fix that up.
index 78672d324a6ef95394ad72a0b0ba29c7d1155d5d..c7209f060eeb7c8672cf8f07ba9c83ac6c9460ac 100644 (file)
@@ -132,7 +132,7 @@ int freeze_processes(void)
        if (!pm_freezing)
                atomic_inc(&system_freezing_cnt);
 
-       pm_wakeup_clear(true);
+       pm_wakeup_clear();
        pr_info("Freezing user space processes ... ");
        pm_freezing = true;
        error = try_to_freeze_tasks(true);
index 3b1e0f3ad07fa69d9524bcb72d1c6d0bab1a9ad8..fa46606f33565613d2ef13a1e948f2bae0dfaa8b 100644 (file)
@@ -1425,7 +1425,7 @@ static unsigned int nr_meta_pages;
  * Numbers of normal and highmem page frames allocated for hibernation image
  * before suspending devices.
  */
-unsigned int alloc_normal, alloc_highmem;
+static unsigned int alloc_normal, alloc_highmem;
 /*
  * Memory bitmap used for marking saveable pages (during hibernation) or
  * hibernation image pages (during restore)
index c0248c74d6d4cef6dbf09f485f36686862c29094..15e6baef5c73f90b6817c0b1c4e871ea40e30318 100644 (file)
@@ -72,8 +72,6 @@ static void freeze_begin(void)
 
 static void freeze_enter(void)
 {
-       trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
-
        spin_lock_irq(&suspend_freeze_lock);
        if (pm_wakeup_pending())
                goto out;
@@ -100,27 +98,6 @@ static void freeze_enter(void)
  out:
        suspend_freeze_state = FREEZE_STATE_NONE;
        spin_unlock_irq(&suspend_freeze_lock);
-
-       trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
-}
-
-static void s2idle_loop(void)
-{
-       do {
-               freeze_enter();
-
-               if (freeze_ops && freeze_ops->wake)
-                       freeze_ops->wake();
-
-               dpm_resume_noirq(PMSG_RESUME);
-               if (freeze_ops && freeze_ops->sync)
-                       freeze_ops->sync();
-
-               if (pm_wakeup_pending())
-                       break;
-
-               pm_wakeup_clear(false);
-       } while (!dpm_suspend_noirq(PMSG_SUSPEND));
 }
 
 void freeze_wake(void)
@@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
         * all the devices are suspended.
         */
        if (state == PM_SUSPEND_FREEZE) {
-               s2idle_loop();
-               goto Platform_early_resume;
+               trace_suspend_resume(TPS("machine_suspend"), state, true);
+               freeze_enter();
+               trace_suspend_resume(TPS("machine_suspend"), state, false);
+               goto Platform_wake;
        }
 
        error = disable_nonboot_cpus();
index a1aecf44ab07c70ab9f33d455646559313926344..a1db38abac5b750e8ce228b441b27900360665ec 100644 (file)
@@ -269,7 +269,6 @@ static struct console *exclusive_console;
 #define MAX_CMDLINECONSOLES 8
 
 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
-static int console_cmdline_cnt;
 
 static int preferred_console = -1;
 int console_set_on_cmdline;
@@ -1906,25 +1905,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
         *      See if this tty is not yet registered, and
         *      if we have a slot free.
         */
-       for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) {
+       for (i = 0, c = console_cmdline;
+            i < MAX_CMDLINECONSOLES && c->name[0];
+            i++, c++) {
                if (strcmp(c->name, name) == 0 && c->index == idx) {
-                       if (brl_options)
-                               return 0;
-
-                       /*
-                        * Maintain an invariant that will help to find if
-                        * the matching console is preferred, see
-                        * register_console():
-                        *
-                        * The last non-braille console is always
-                        * the preferred one.
-                        */
-                       if (i != console_cmdline_cnt - 1)
-                               swap(console_cmdline[i],
-                                    console_cmdline[console_cmdline_cnt - 1]);
-
-                       preferred_console = console_cmdline_cnt - 1;
-
+                       if (!brl_options)
+                               preferred_console = i;
                        return 0;
                }
        }
@@ -1937,7 +1923,6 @@ static int __add_preferred_console(char *name, int idx, char *options,
        braille_set_options(c, brl_options);
 
        c->index = idx;
-       console_cmdline_cnt++;
        return 0;
 }
 /*
@@ -2477,23 +2462,12 @@ void register_console(struct console *newcon)
        }
 
        /*
-        * See if this console matches one we selected on the command line.
-        *
-        * There may be several entries in the console_cmdline array matching
-        * with the same console, one with newcon->match(), another by
-        * name/index:
-        *
-        *      pl011,mmio,0x87e024000000,115200 -- added from SPCR
-        *      ttyAMA0 -- added from command line
-        *
-        * Traverse the console_cmdline array in reverse order to be
-        * sure that if this console is preferred then it will be the first
-        * matching entry.  We use the invariant that is maintained in
-        * __add_preferred_console().
+        *      See if this console matches one we selected on
+        *      the command line.
         */
-       for (i = console_cmdline_cnt - 1; i >= 0; i--) {
-               c = console_cmdline + i;
-
+       for (i = 0, c = console_cmdline;
+            i < MAX_CMDLINECONSOLES && c->name[0];
+            i++, c++) {
                if (!newcon->match ||
                    newcon->match(newcon, c->name, c->index, c->options) != 0) {
                        /* default matching */
index 266ddcc1d8bbbc6af7bceda3657618beef2a9c59..60f356d91060c8974268cc5bc02d57c75d359afc 100644 (file)
@@ -60,19 +60,25 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
 }
 
 
+void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
+                  const struct cred *ptracer_cred)
+{
+       BUG_ON(!list_empty(&child->ptrace_entry));
+       list_add(&child->ptrace_entry, &new_parent->ptraced);
+       child->parent = new_parent;
+       child->ptracer_cred = get_cred(ptracer_cred);
+}
+
 /*
  * ptrace a task: make the debugger its new parent and
  * move it to the ptrace list.
  *
  * Must be called with the tasklist lock write-held.
  */
-void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
+static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 {
-       BUG_ON(!list_empty(&child->ptrace_entry));
-       list_add(&child->ptrace_entry, &new_parent->ptraced);
-       child->parent = new_parent;
        rcu_read_lock();
-       child->ptracer_cred = get_cred(__task_cred(new_parent));
+       __ptrace_link(child, new_parent, __task_cred(new_parent));
        rcu_read_unlock();
 }
 
@@ -386,7 +392,7 @@ static int ptrace_attach(struct task_struct *task, long request,
                flags |= PT_SEIZED;
        task->ptrace = flags;
 
-       __ptrace_link(task, current);
+       ptrace_link(task, current);
 
        /* SEIZE doesn't trap tracee on attach */
        if (!seize)
@@ -459,7 +465,7 @@ static int ptrace_traceme(void)
                 */
                if (!ret && !(current->real_parent->flags & PF_EXITING)) {
                        current->ptrace = PT_PTRACED;
-                       __ptrace_link(current, current->real_parent);
+                       ptrace_link(current, current->real_parent);
                }
        }
        write_unlock_irq(&tasklist_lock);
index 76877a62b5fa374f0daa93f1180ce853ba32fc0c..622eed1b7658301a94a645c4426598b0096a8ab9 100644 (file)
@@ -245,11 +245,10 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
        sugov_update_commit(sg_policy, time, next_f);
 }
 
-static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
 {
        struct sugov_policy *sg_policy = sg_cpu->sg_policy;
        struct cpufreq_policy *policy = sg_policy->policy;
-       u64 last_freq_update_time = sg_policy->last_freq_update_time;
        unsigned long util = 0, max = 1;
        unsigned int j;
 
@@ -265,7 +264,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
                 * enough, don't take the CPU into account as it probably is
                 * idle now (and clear iowait_boost for it).
                 */
-               delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+               delta_ns = time - j_sg_cpu->last_update;
                if (delta_ns > TICK_NSEC) {
                        j_sg_cpu->iowait_boost = 0;
                        continue;
@@ -309,7 +308,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
                if (flags & SCHED_CPUFREQ_RT_DL)
                        next_f = sg_policy->policy->cpuinfo.max_freq;
                else
-                       next_f = sugov_next_freq_shared(sg_cpu);
+                       next_f = sugov_next_freq_shared(sg_cpu, time);
 
                sugov_update_commit(sg_policy, time, next_f);
        }
index 1370f067fb51511f26d578cb3954032952a62027..d2a1e6dd02913f4beb58b1d79be12d72761deec3 100644 (file)
@@ -825,8 +825,10 @@ static void check_thread_timers(struct task_struct *tsk,
                         * At the hard limit, we just die.
                         * No need to calculate anything else now.
                         */
-                       pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
-                               tsk->comm, task_pid_nr(tsk));
+                       if (print_fatal_signals) {
+                               pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
+                                       tsk->comm, task_pid_nr(tsk));
+                       }
                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
                        return;
                }
@@ -838,8 +840,10 @@ static void check_thread_timers(struct task_struct *tsk,
                                soft += USEC_PER_SEC;
                                sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
                        }
-                       pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
-                               tsk->comm, task_pid_nr(tsk));
+                       if (print_fatal_signals) {
+                               pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
+                                       tsk->comm, task_pid_nr(tsk));
+                       }
                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
                }
        }
@@ -936,8 +940,10 @@ static void check_process_timers(struct task_struct *tsk,
                         * At the hard limit, we just die.
                         * No need to calculate anything else now.
                         */
-                       pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
-                               tsk->comm, task_pid_nr(tsk));
+                       if (print_fatal_signals) {
+                               pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
+                                       tsk->comm, task_pid_nr(tsk));
+                       }
                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
                        return;
                }
@@ -945,8 +951,10 @@ static void check_process_timers(struct task_struct *tsk,
                        /*
                         * At the soft limit, send a SIGXCPU every second.
                         */
-                       pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
-                               tsk->comm, task_pid_nr(tsk));
+                       if (print_fatal_signals) {
+                               pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
+                                       tsk->comm, task_pid_nr(tsk));
+                       }
                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
                        if (soft < hard) {
                                soft++;
index 74fdfe9ed3dba7fa659cb11feafac25919984bbb..9e5841dc14b5fa62e0c1470df704c3dc9b99f8ad 100644 (file)
@@ -5063,7 +5063,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
        }
 
  out:
-       kfree(fgd->new_hash);
+       free_ftrace_hash(fgd->new_hash);
        kfree(fgd);
 
        return ret;
index 889bc31785bee57901ae969f16a55390db8265c4..be88cbaadde3aeed809b2997e2ef683fba62dba4 100644 (file)
@@ -4504,6 +4504,44 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       {
+               "JMP_JSGE_K: Signed jump: value walk 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -3),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 6),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 1),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 1),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 1),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
+                       BPF_EXIT_INSN(),                /* bad exit */
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),  /* good exit */
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSGE_K: Signed jump: value walk 2",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -3),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 2),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 2),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
+                       BPF_EXIT_INSN(),                /* bad exit */
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),  /* good exit */
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JGT | BPF_K */
        {
                "JMP_JGT_K: if (3 > 2) return 1",
index d9e6fddcc51f06a1286c56a24c510c1a3efa8add..b3c7214d710d5ea8bab8648b5182c53d882f3c31 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -407,12 +407,10 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
 
        ret = handle_mm_fault(vma, address, fault_flags);
        if (ret & VM_FAULT_ERROR) {
-               if (ret & VM_FAULT_OOM)
-                       return -ENOMEM;
-               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
-                       return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
-               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
-                       return -EFAULT;
+               int err = vm_fault_to_errno(ret, *flags);
+
+               if (err)
+                       return err;
                BUG();
        }
 
@@ -723,12 +721,10 @@ retry:
        ret = handle_mm_fault(vma, address, fault_flags);
        major |= ret & VM_FAULT_MAJOR;
        if (ret & VM_FAULT_ERROR) {
-               if (ret & VM_FAULT_OOM)
-                       return -ENOMEM;
-               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
-                       return -EHWPOISON;
-               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
-                       return -EFAULT;
+               int err = vm_fault_to_errno(ret, 0);
+
+               if (err)
+                       return err;
                BUG();
        }
 
index e5828875f7bbd7a770d5c23334a0e3994ffe544f..3eedb187e5496f36f7f3186267f475254bcda5ab 100644 (file)
@@ -4170,6 +4170,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        }
                        ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
                        if (ret & VM_FAULT_ERROR) {
+                               int err = vm_fault_to_errno(ret, flags);
+
+                               if (err)
+                                       return err;
+
                                remainder = 0;
                                break;
                        }
index d9fc0e4561283d9a351f6dd7c4cfb74ad26ab566..216184af0e192b5405efc5a594129fa7c53ae953 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1028,8 +1028,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
                goto out;
 
        if (PageTransCompound(page)) {
-               err = split_huge_page(page);
-               if (err)
+               if (split_huge_page(page))
                        goto out_unlock;
        }
 
index b049c9b2dba8718a6f57777591c2f2753d8d40ad..7b8a5db76a2fec7331f09048f3c19ebdfddf9f16 100644 (file)
@@ -1739,6 +1739,29 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
        }
 }
 
+extern unsigned long __init_memblock
+memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
+{
+       struct memblock_region *rgn;
+       unsigned long size = 0;
+       int idx;
+
+       for_each_memblock_type((&memblock.reserved), rgn) {
+               phys_addr_t start, end;
+
+               if (rgn->base + rgn->size < start_addr)
+                       continue;
+               if (rgn->base > end_addr)
+                       continue;
+
+               start = rgn->base;
+               end = start + rgn->size;
+               size += end - start;
+       }
+
+       return size;
+}
+
 void __init_memblock __memblock_dump_all(void)
 {
        pr_info("MEMBLOCK configuration:\n");
index 2527dfeddb003d245ac2e2bd964134030426f777..342fac9ba89b0da3e207b1fdaef2be71c9837a24 100644 (file)
@@ -1595,12 +1595,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
        if (ret) {
                pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
                        pfn, ret, page->flags, &page->flags);
-               /*
-                * We know that soft_offline_huge_page() tries to migrate
-                * only one hugepage pointed to by hpage, so we need not
-                * run through the pagelist here.
-                */
-               putback_active_hugepage(hpage);
+               if (!list_empty(&pagelist))
+                       putback_movable_pages(&pagelist);
                if (ret > 0)
                        ret = -EIO;
        } else {
index 6ff5d729ded0ecd3a5607d10248697a786091f7e..2e65df1831d941dcd1282c56312bdbd153df0a79 100644 (file)
@@ -3029,6 +3029,17 @@ static int __do_fault(struct vm_fault *vmf)
        return ret;
 }
 
+/*
+ * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
+ * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
+ * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
+ * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
+ */
+static int pmd_devmap_trans_unstable(pmd_t *pmd)
+{
+       return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
+}
+
 static int pte_alloc_one_map(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
@@ -3052,18 +3063,27 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
 map_pte:
        /*
         * If a huge pmd materialized under us just retry later.  Use
-        * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
-        * didn't become pmd_trans_huge under us and then back to pmd_none, as
-        * a result of MADV_DONTNEED running immediately after a huge pmd fault
-        * in a different thread of this mm, in turn leading to a misleading
-        * pmd_trans_huge() retval.  All we have to ensure is that it is a
-        * regular pmd that we can walk with pte_offset_map() and we can do that
-        * through an atomic read in C, which is what pmd_trans_unstable()
-        * provides.
+        * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
+        * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
+        * under us and then back to pmd_none, as a result of MADV_DONTNEED
+        * running immediately after a huge pmd fault in a different thread of
+        * this mm, in turn leading to a misleading pmd_trans_huge() retval.
+        * All we have to ensure is that it is a regular pmd that we can walk
+        * with pte_offset_map() and we can do that through an atomic read in
+        * C, which is what pmd_trans_unstable() provides.
         */
-       if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
+       if (pmd_devmap_trans_unstable(vmf->pmd))
                return VM_FAULT_NOPAGE;
 
+       /*
+        * At this point we know that our vmf->pmd points to a page of ptes
+        * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
+        * for the duration of the fault.  If a racing MADV_DONTNEED runs and
+        * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
+        * be valid and we will re-check to make sure the vmf->pte isn't
+        * pte_none() under vmf->ptl protection when we return to
+        * alloc_set_pte().
+        */
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
                        &vmf->ptl);
        return 0;
@@ -3690,7 +3710,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
                vmf->pte = NULL;
        } else {
                /* See comment in pte_alloc_one_map() */
-               if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
+               if (pmd_devmap_trans_unstable(vmf->pmd))
                        return 0;
                /*
                 * A regular pmd is established and it can't morph into a huge
index c483c5c20b4bd12bcca50972c9f74a0dbd3a713e..b562b5523a6544e6c0ae6e4f792943441f6217a3 100644 (file)
@@ -284,7 +284,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
 {
        int i;
        int nr = pagevec_count(pvec);
-       int delta_munlocked;
+       int delta_munlocked = -nr;
        struct pagevec pvec_putback;
        int pgrescued = 0;
 
@@ -304,6 +304,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                                continue;
                        else
                                __munlock_isolation_failed(page);
+               } else {
+                       delta_munlocked++;
                }
 
                /*
@@ -315,7 +317,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                pagevec_add(&pvec_putback, pvec->pages[i]);
                pvec->pages[i] = NULL;
        }
-       delta_munlocked = -nr + pagevec_count(&pvec_putback);
        __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
        spin_unlock_irq(zone_lru_lock(zone));
 
index f9e450c6b6e414d61b00d5a61be9cdea3b773e1b..2302f250d6b1ba150e3c2e4e17cfb6c99574ab5b 100644 (file)
@@ -292,6 +292,26 @@ int page_group_by_mobility_disabled __read_mostly;
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 static inline void reset_deferred_meminit(pg_data_t *pgdat)
 {
+       unsigned long max_initialise;
+       unsigned long reserved_lowmem;
+
+       /*
+        * Initialise at least 2G of a node but also take into account that
+        * two large system hashes that can take up 1GB for 0.25TB/node.
+        */
+       max_initialise = max(2UL << (30 - PAGE_SHIFT),
+               (pgdat->node_spanned_pages >> 8));
+
+       /*
+        * Compensate the all the memblock reservations (e.g. crash kernel)
+        * from the initial estimation to make sure we will initialize enough
+        * memory to boot.
+        */
+       reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+                       pgdat->node_start_pfn + max_initialise);
+       max_initialise += reserved_lowmem;
+
+       pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
        pgdat->first_deferred_pfn = ULONG_MAX;
 }
 
@@ -314,20 +334,11 @@ static inline bool update_defer_init(pg_data_t *pgdat,
                                unsigned long pfn, unsigned long zone_end,
                                unsigned long *nr_initialised)
 {
-       unsigned long max_initialise;
-
        /* Always populate low zones for address-contrained allocations */
        if (zone_end < pgdat_end_pfn(pgdat))
                return true;
-       /*
-        * Initialise at least 2G of a node but also take into account that
-        * two large system hashes that can take up 1GB for 0.25TB/node.
-        */
-       max_initialise = max(2UL << (30 - PAGE_SHIFT),
-               (pgdat->node_spanned_pages >> 8));
-
        (*nr_initialised)++;
-       if ((*nr_initialised > max_initialise) &&
+       if ((*nr_initialised > pgdat->static_init_size) &&
            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
                pgdat->first_deferred_pfn = pfn;
                return false;
@@ -3870,7 +3881,9 @@ retry:
                goto got_pg;
 
        /* Avoid allocations with no watermarks from looping endlessly */
-       if (test_thread_flag(TIF_MEMDIE))
+       if (test_thread_flag(TIF_MEMDIE) &&
+           (alloc_flags == ALLOC_NO_WATERMARKS ||
+            (gfp_mask & __GFP_NOMEMALLOC)))
                goto nopage;
 
        /* Retry as long as the OOM killer is making progress */
@@ -6136,7 +6149,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
        /* pg_data_t should be reset to zero when it's allocated */
        WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
 
-       reset_deferred_meminit(pgdat);
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
        pgdat->per_cpu_nodestats = NULL;
@@ -6158,6 +6170,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
                (unsigned long)pgdat->node_mem_map);
 #endif
 
+       reset_deferred_meminit(pgdat);
        free_area_init_core(pgdat);
 }
 
index 57e5156f02be6bcc23e70ec801e9cc1c3bbdd631..7449593fca724147cef5b8f7a46752333e5e0585 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5512,6 +5512,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
                char mbuf[64];
                char *buf;
                struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+               ssize_t len;
 
                if (!attr || !attr->store || !attr->show)
                        continue;
@@ -5536,8 +5537,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
                        buf = buffer;
                }
 
-               attr->show(root_cache, buf);
-               attr->store(s, buf, strlen(buf));
+               len = attr->show(root_cache, buf);
+               if (len > 0)
+                       attr->store(s, buf, len);
        }
 
        if (buffer)
index 464df34899031d46058b7cbadc1c3be24ae8dc89..26be6407abd7efe452a585d341a8d7e5b53d2b32 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -357,8 +357,11 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
        WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
 
        /*
-        * Make sure that larger requests are not too disruptive - no OOM
-        * killer and no allocation failure warnings as we have a fallback
+        * We want to attempt a large physically contiguous block first because
+        * it is less likely to fragment multiple larger blocks and therefore
+        * contribute to a long term fragmentation less than vmalloc fallback.
+        * However make sure that larger requests are not too disruptive - no
+        * OOM killer and no allocation failure warnings as we have a fallback.
         */
        if (size > PAGE_SIZE) {
                kmalloc_flags |= __GFP_NOWARN;
index 574f78824d8a2ae53751bbe1849e53502bc575be..32bd3ead9ba14a0b42bda3fc959f9134a8c9cc36 100644 (file)
@@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br,
                err = 0;
                switch (nla_type(attr)) {
                case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
-                       if (!(p->flags & BR_VLAN_TUNNEL))
+                       if (!p || !(p->flags & BR_VLAN_TUNNEL))
                                return -EINVAL;
                        err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
                        if (err)
index 08341d2aa9c946d7bdd6e0d599e31ba96557a290..6f12a5271219f071ed2d0bacb263561cb9e0f605 100644 (file)
@@ -179,6 +179,8 @@ static void br_stp_start(struct net_bridge *br)
                br_debug(br, "using kernel STP\n");
 
                /* To start timers on any ports left in blocking */
+               if (br->dev->flags & IFF_UP)
+                       mod_timer(&br->hello_timer, jiffies + br->hello_time);
                br_port_state_selection(br);
        }
 
index c98b3e5c140a5f30a28fd748408cf5e949a032b6..60b6fe277a8b0c90faad0dfcda87f149ceaa5552 100644 (file)
@@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg)
        if (br->dev->flags & IFF_UP) {
                br_config_bpdu_generation(br);
 
-               if (br->stp_enabled != BR_USER_STP)
+               if (br->stp_enabled == BR_KERNEL_STP)
                        mod_timer(&br->hello_timer,
                                  round_jiffies(jiffies + br->hello_time));
        }
index 5929309beaa1d5d310a19029364693c0b0355772..db85230e49c3b7e97093d54a1d7625d9145dfeb4 100644 (file)
@@ -68,6 +68,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
        if (e->ethproto != htons(ETH_P_ARP) ||
            e->invflags & EBT_IPROTO)
                return -EINVAL;
+       if (ebt_invalid_target(info->target))
+               return -EINVAL;
+
        return 0;
 }
 
index 9ec0c9f908fa712b18bfa25c87aa7bce12cee786..9c6e619f452bc96770a8e340d3879adce6dadd2a 100644 (file)
@@ -1373,7 +1373,8 @@ static inline int ebt_obj_to_user(char __user *um, const char *_name,
        strlcpy(name, _name, sizeof(name));
        if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) ||
            put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) ||
-           xt_data_to_user(um + entrysize, data, usersize, datasize))
+           xt_data_to_user(um + entrysize, data, usersize, datasize,
+                           XT_ALIGN(datasize)))
                return -EFAULT;
 
        return 0;
@@ -1658,7 +1659,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
                if (match->compat_to_user(cm->data, m->data))
                        return -EFAULT;
        } else {
-               if (xt_data_to_user(cm->data, m->data, match->usersize, msize))
+               if (xt_data_to_user(cm->data, m->data, match->usersize, msize,
+                                   COMPAT_XT_ALIGN(msize)))
                        return -EFAULT;
        }
 
@@ -1687,7 +1689,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
                if (target->compat_to_user(cm->data, t->data))
                        return -EFAULT;
        } else {
-               if (xt_data_to_user(cm->data, t->data, target->usersize, tsize))
+               if (xt_data_to_user(cm->data, t->data, target->usersize, tsize,
+                                   COMPAT_XT_ALIGN(tsize)))
                        return -EFAULT;
        }
 
index 2034fb9266700e6b456b141db9b5d264aed77d62..8757fb87dab871faaaeccaed0ac11d76521ad594 100644 (file)
@@ -151,7 +151,7 @@ static int process_one_ticket(struct ceph_auth_client *ac,
        struct timespec validity;
        void *tp, *tpend;
        void **ptp;
-       struct ceph_crypto_key new_session_key;
+       struct ceph_crypto_key new_session_key = { 0 };
        struct ceph_buffer *new_ticket_blob;
        unsigned long new_expires, new_renew_after;
        u64 new_secret_id;
@@ -215,6 +215,9 @@ static int process_one_ticket(struct ceph_auth_client *ac,
        dout(" ticket blob is %d bytes\n", dlen);
        ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad);
        blob_struct_v = ceph_decode_8(ptp);
+       if (blob_struct_v != 1)
+               goto bad;
+
        new_secret_id = ceph_decode_64(ptp);
        ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend);
        if (ret)
@@ -234,13 +237,13 @@ static int process_one_ticket(struct ceph_auth_client *ac,
             type, ceph_entity_type_name(type), th->secret_id,
             (int)th->ticket_blob->vec.iov_len);
        xi->have_keys |= th->service;
-
-out:
-       return ret;
+       return 0;
 
 bad:
        ret = -EINVAL;
-       goto out;
+out:
+       ceph_crypto_key_destroy(&new_session_key);
+       return ret;
 }
 
 static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
index 4fd02831beed20fb3d98475c7da2a0b4f2818513..47e94b560ba0d37daf2fb801fa11b9aa39720013 100644 (file)
@@ -56,19 +56,6 @@ static const struct kernel_param_ops param_ops_supported_features = {
 module_param_cb(supported_features, &param_ops_supported_features, NULL,
                S_IRUGO);
 
-/*
- * find filename portion of a path (/foo/bar/baz -> baz)
- */
-const char *ceph_file_part(const char *s, int len)
-{
-       const char *e = s + len;
-
-       while (e != s && *(e-1) != '/')
-               e--;
-       return e;
-}
-EXPORT_SYMBOL(ceph_file_part);
-
 const char *ceph_msg_type_name(int type)
 {
        switch (type) {
index 5766a6c896c4fa7290fffdbee239701ca4e2eeef..588a919300514ad2cd4b843c528bb41e601fa7d8 100644 (file)
@@ -1174,8 +1174,8 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
  * Returns true if the result moves the cursor on to the next piece
  * of the data item.
  */
-static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
-                               size_t bytes)
+static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
+                                 size_t bytes)
 {
        bool new_piece;
 
@@ -1207,8 +1207,6 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
                new_piece = true;
        }
        cursor->need_crc = new_piece;
-
-       return new_piece;
 }
 
 static size_t sizeof_footer(struct ceph_connection *con)
@@ -1577,7 +1575,6 @@ static int write_partial_message_data(struct ceph_connection *con)
                size_t page_offset;
                size_t length;
                bool last_piece;
-               bool need_crc;
                int ret;
 
                page = ceph_msg_data_next(cursor, &page_offset, &length,
@@ -1592,7 +1589,7 @@ static int write_partial_message_data(struct ceph_connection *con)
                }
                if (do_datacrc && cursor->need_crc)
                        crc = ceph_crc32c_page(crc, page, page_offset, length);
-               need_crc = ceph_msg_data_advance(cursor, (size_t)ret);
+               ceph_msg_data_advance(cursor, (size_t)ret);
        }
 
        dout("%s %p msg %p done\n", __func__, con, msg);
@@ -2231,10 +2228,18 @@ static void process_ack(struct ceph_connection *con)
        struct ceph_msg *m;
        u64 ack = le64_to_cpu(con->in_temp_ack);
        u64 seq;
+       bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ);
+       struct list_head *list = reconnect ? &con->out_queue : &con->out_sent;
 
-       while (!list_empty(&con->out_sent)) {
-               m = list_first_entry(&con->out_sent, struct ceph_msg,
-                                    list_head);
+       /*
+        * In the reconnect case, con_fault() has requeued messages
+        * in out_sent. We should cleanup old messages according to
+        * the reconnect seq.
+        */
+       while (!list_empty(list)) {
+               m = list_first_entry(list, struct ceph_msg, list_head);
+               if (reconnect && m->needs_out_seq)
+                       break;
                seq = le64_to_cpu(m->hdr.seq);
                if (seq > ack)
                        break;
@@ -2243,6 +2248,7 @@ static void process_ack(struct ceph_connection *con)
                m->ack_stamp = jiffies;
                ceph_msg_remove(m);
        }
+
        prepare_read_tag(con);
 }
 
@@ -2299,7 +2305,7 @@ static int read_partial_msg_data(struct ceph_connection *con)
 
                if (do_datacrc)
                        crc = ceph_crc32c_page(crc, page, page_offset, ret);
-               (void) ceph_msg_data_advance(cursor, (size_t)ret);
+               ceph_msg_data_advance(cursor, (size_t)ret);
        }
        if (do_datacrc)
                con->in_data_crc = crc;
index 29a0ef351c5e068838fc1ed8f634439853ff0a3a..250f11f786092d902b52da5af9a228d9786acf62 100644 (file)
@@ -43,15 +43,13 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
        int i, err = -EINVAL;
        struct ceph_fsid fsid;
        u32 epoch, num_mon;
-       u16 version;
        u32 len;
 
        ceph_decode_32_safe(&p, end, len, bad);
        ceph_decode_need(&p, end, len, bad);
 
        dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
-
-       ceph_decode_16_safe(&p, end, version, bad);
+       p += sizeof(u16);  /* skip version */
 
        ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
        ceph_decode_copy(&p, &fsid, sizeof(fsid));
index ffe9e904d4d1d130b0353edbe45d50d236b4f74e..55e3a477f92d4cba92e342d13686b6e3b94204c0 100644 (file)
@@ -317,6 +317,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
                u32 yes;
                struct crush_rule *r;
 
+               err = -EINVAL;
                ceph_decode_32_safe(p, end, yes, bad);
                if (!yes) {
                        dout("crush_decode NO rule %d off %x %p to %p\n",
index b0b87a292e7ccac2221a2425510b3c28e1df97f7..a0adfc31a3fe854cd52600f6b2924255aee521c2 100644 (file)
@@ -1680,8 +1680,10 @@ start_again:
 
        hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
                          &devlink_nl_family, NLM_F_MULTI, cmd);
-       if (!hdr)
+       if (!hdr) {
+               nlmsg_free(skb);
                return -EMSGSIZE;
+       }
 
        if (devlink_nl_put_handle(skb, devlink))
                goto nla_put_failure;
@@ -2098,8 +2100,10 @@ start_again:
 
        hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
                          &devlink_nl_family, NLM_F_MULTI, cmd);
-       if (!hdr)
+       if (!hdr) {
+               nlmsg_free(skb);
                return -EMSGSIZE;
+       }
 
        if (devlink_nl_put_handle(skb, devlink))
                goto nla_put_failure;
index 960e503b5a529a2c4f1866f49c150493ee98d7da..6192f11beec9077de964e2aeff4f78547f08b8da 100644 (file)
@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(dst_discard_out);
 
-const u32 dst_default_metrics[RTAX_MAX + 1] = {
+const struct dst_metrics dst_default_metrics = {
        /* This initializer is needed to force linker to place this variable
         * into const section. Otherwise it might end into bss section.
         * We really want to avoid false sharing on this variable, and catch
         * any writes on it.
         */
-       [RTAX_MAX] = 0xdeadbeef,
+       .refcnt = ATOMIC_INIT(1),
 };
 
 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
        if (dev)
                dev_hold(dev);
        dst->ops = ops;
-       dst_init_metrics(dst, dst_default_metrics, true);
+       dst_init_metrics(dst, dst_default_metrics.metrics, true);
        dst->expires = 0UL;
        dst->path = dst;
        dst->from = NULL;
@@ -314,25 +314,30 @@ EXPORT_SYMBOL(dst_release);
 
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
 {
-       u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+       struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
 
        if (p) {
-               u32 *old_p = __DST_METRICS_PTR(old);
+               struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
                unsigned long prev, new;
 
-               memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+               atomic_set(&p->refcnt, 1);
+               memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
 
                new = (unsigned long) p;
                prev = cmpxchg(&dst->_metrics, old, new);
 
                if (prev != old) {
                        kfree(p);
-                       p = __DST_METRICS_PTR(prev);
+                       p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
                        if (prev & DST_METRICS_READ_ONLY)
                                p = NULL;
+               } else if (prev & DST_METRICS_REFCOUNTED) {
+                       if (atomic_dec_and_test(&old_p->refcnt))
+                               kfree(old_p);
                }
        }
-       return p;
+       BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
+       return (u32 *)p;
 }
 EXPORT_SYMBOL(dst_cow_metrics_generic);
 
@@ -341,7 +346,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
 {
        unsigned long prev, new;
 
-       new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
+       new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
        prev = cmpxchg(&dst->_metrics, old, new);
        if (prev == old)
                kfree(__DST_METRICS_PTR(old));
index a253a6197e6b37a7ae2fe451c646b01c861a3e22..a6bb95fa87b26a6627ba38cd9a94269c7e59f73d 100644 (file)
@@ -2281,6 +2281,7 @@ bool bpf_helper_changes_pkt_data(void *func)
            func == bpf_skb_change_head ||
            func == bpf_skb_change_tail ||
            func == bpf_skb_pull_data ||
+           func == bpf_clone_redirect ||
            func == bpf_l3_csum_replace ||
            func == bpf_l4_csum_replace ||
            func == bpf_xdp_adjust_head)
index 1934efd4a9d4986f3497abc40968fd08c91896b3..26bbfababff27cecc589bca69e3eb14739187f5b 100644 (file)
@@ -315,6 +315,25 @@ out_undo:
        goto out;
 }
 
+static int __net_init net_defaults_init_net(struct net *net)
+{
+       net->core.sysctl_somaxconn = SOMAXCONN;
+       return 0;
+}
+
+static struct pernet_operations net_defaults_ops = {
+       .init = net_defaults_init_net,
+};
+
+static __init int net_defaults_init(void)
+{
+       if (register_pernet_subsys(&net_defaults_ops))
+               panic("Cannot initialize net default settings");
+
+       return 0;
+}
+
+core_initcall(net_defaults_init);
 
 #ifdef CONFIG_NET_NS
 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
index 49a279a7cc15b0d2409236f53a0629ccc927e07c..9e2c0a7cb3256e8cb2af1d65aaf293db96b9a418 100644 (file)
@@ -3231,8 +3231,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int err = 0;
        int fidx = 0;
 
-       if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
-                       IFLA_MAX, ifla_policy, NULL) == 0) {
+       err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
+                         IFLA_MAX, ifla_policy, NULL);
+       if (err < 0) {
+               return -EINVAL;
+       } else if (err == 0) {
                if (tb[IFLA_MASTER])
                        br_idx = nla_get_u32(tb[IFLA_MASTER]);
        }
index 346d3e85dfbc2eca1ded0442ecb78d31e1768523..b1be7c01efe269d2bc97be7dcd1cc5485d29fa7b 100644 (file)
@@ -3754,8 +3754,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
 
        spin_lock_irqsave(&q->lock, flags);
        skb = __skb_dequeue(q);
-       if (skb && (skb_next = skb_peek(q)))
+       if (skb && (skb_next = skb_peek(q))) {
                icmp_next = is_icmp_err_skb(skb_next);
+               if (icmp_next)
+                       sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
+       }
        spin_unlock_irqrestore(&q->lock, flags);
 
        if (is_icmp_err_skb(skb) && !icmp_next)
index ea23254b2457cf15eeae495130dab437090f8e2e..b7cd9aafe99e9b9216b13cce617133cdf832eeeb 100644 (file)
@@ -479,8 +479,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
 {
        struct ctl_table *tbl;
 
-       net->core.sysctl_somaxconn = SOMAXCONN;
-
        tbl = netns_core_table;
        if (!net_eq(net, &init_net)) {
                tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
index 26130ae438da53f3f99a4e9fa712a572f40ca779..90038d45a54764df55017b46258dc772b0af1c96 100644 (file)
@@ -223,6 +223,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+int dsa_switch_suspend(struct dsa_switch *ds)
+{
+       int i, ret = 0;
+
+       /* Suspend slave network devices */
+       for (i = 0; i < ds->num_ports; i++) {
+               if (!dsa_is_port_initialized(ds, i))
+                       continue;
+
+               ret = dsa_slave_suspend(ds->ports[i].netdev);
+               if (ret)
+                       return ret;
+       }
+
+       if (ds->ops->suspend)
+               ret = ds->ops->suspend(ds);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_suspend);
+
+int dsa_switch_resume(struct dsa_switch *ds)
+{
+       int i, ret = 0;
+
+       if (ds->ops->resume)
+               ret = ds->ops->resume(ds);
+
+       if (ret)
+               return ret;
+
+       /* Resume slave network devices */
+       for (i = 0; i < ds->num_ports; i++) {
+               if (!dsa_is_port_initialized(ds, i))
+                       continue;
+
+               ret = dsa_slave_resume(ds->ports[i].netdev);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_resume);
+#endif
+
 static struct packet_type dsa_pack_type __read_mostly = {
        .type   = cpu_to_be16(ETH_P_XDSA),
        .func   = dsa_switch_rcv,
index 033b3bfb63dc1887b15b3e08f00a00f70b706ec4..7796580e99ee2c57cdd5503130c8ec2e121d879a 100644 (file)
@@ -484,8 +484,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
                dsa_ds_unapply(dst, ds);
        }
 
-       if (dst->cpu_switch)
+       if (dst->cpu_switch) {
                dsa_cpu_port_ethtool_restore(dst->cpu_switch);
+               dst->cpu_switch = NULL;
+       }
 
        pr_info("DSA: tree %d unapplied\n", dst->tree);
        dst->applied = false;
index ad345c8b0b0693cc214b212ccbb402859d910134..7281098df04ecd597b7824a9c0d2ec1d7f60e2b9 100644 (file)
@@ -289,53 +289,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
        dsa_switch_unregister_notifier(ds);
 }
 
-#ifdef CONFIG_PM_SLEEP
-int dsa_switch_suspend(struct dsa_switch *ds)
-{
-       int i, ret = 0;
-
-       /* Suspend slave network devices */
-       for (i = 0; i < ds->num_ports; i++) {
-               if (!dsa_is_port_initialized(ds, i))
-                       continue;
-
-               ret = dsa_slave_suspend(ds->ports[i].netdev);
-               if (ret)
-                       return ret;
-       }
-
-       if (ds->ops->suspend)
-               ret = ds->ops->suspend(ds);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(dsa_switch_suspend);
-
-int dsa_switch_resume(struct dsa_switch *ds)
-{
-       int i, ret = 0;
-
-       if (ds->ops->resume)
-               ret = ds->ops->resume(ds);
-
-       if (ret)
-               return ret;
-
-       /* Resume slave network devices */
-       for (i = 0; i < ds->num_ports; i++) {
-               if (!dsa_is_port_initialized(ds, i))
-                       continue;
-
-               ret = dsa_slave_resume(ds->ports[i].netdev);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(dsa_switch_resume);
-#endif
-
 /* platform driver init and cleanup *****************************************/
 static int dev_is_class(struct device *dev, void *class)
 {
index f3dad16613437c0c7ac3e9c7518a0929cddb3ca7..58925b6597de83e7d643fb9b1c7e992c9748ae1c 100644 (file)
@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
                .type =       SOCK_DGRAM,
                .protocol =   IPPROTO_ICMP,
                .prot =       &ping_prot,
-               .ops =        &inet_dgram_ops,
+               .ops =        &inet_sockraw_ops,
                .flags =      INET_PROTOSW_REUSE,
        },
 
index d54345a06f720fb1cd7632a364aa7e7e19ff6216..e9f3386a528b4a792839d5e50894a68d3097eb42 100644 (file)
@@ -641,6 +641,32 @@ void arp_xmit(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(arp_xmit);
 
+static bool arp_is_garp(struct net *net, struct net_device *dev,
+                       int *addr_type, __be16 ar_op,
+                       __be32 sip, __be32 tip,
+                       unsigned char *sha, unsigned char *tha)
+{
+       bool is_garp = tip == sip;
+
+       /* Gratuitous ARP _replies_ also require target hwaddr to be
+        * the same as source.
+        */
+       if (is_garp && ar_op == htons(ARPOP_REPLY))
+               is_garp =
+                       /* IPv4 over IEEE 1394 doesn't provide target
+                        * hardware address field in its ARP payload.
+                        */
+                       tha &&
+                       !memcmp(tha, sha, dev->addr_len);
+
+       if (is_garp) {
+               *addr_type = inet_addr_type_dev_table(net, dev, sip);
+               if (*addr_type != RTN_UNICAST)
+                       is_garp = false;
+       }
+       return is_garp;
+}
+
 /*
  *     Process an arp request.
  */
@@ -837,29 +863,25 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
 
        n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
 
-       if (IN_DEV_ARP_ACCEPT(in_dev)) {
-               unsigned int addr_type = inet_addr_type_dev_table(net, dev, sip);
+       addr_type = -1;
+       if (n || IN_DEV_ARP_ACCEPT(in_dev)) {
+               is_garp = arp_is_garp(net, dev, &addr_type, arp->ar_op,
+                                     sip, tip, sha, tha);
+       }
 
+       if (IN_DEV_ARP_ACCEPT(in_dev)) {
                /* Unsolicited ARP is not accepted by default.
                   It is possible, that this option should be enabled for some
                   devices (strip is candidate)
                 */
-               is_garp = tip == sip && addr_type == RTN_UNICAST;
-
-               /* Unsolicited ARP _replies_ also require target hwaddr to be
-                * the same as source.
-                */
-               if (is_garp && arp->ar_op == htons(ARPOP_REPLY))
-                       is_garp =
-                               /* IPv4 over IEEE 1394 doesn't provide target
-                                * hardware address field in its ARP payload.
-                                */
-                               tha &&
-                               !memcmp(tha, sha, dev->addr_len);
-
                if (!n &&
-                   ((arp->ar_op == htons(ARPOP_REPLY)  &&
-                               addr_type == RTN_UNICAST) || is_garp))
+                   (is_garp ||
+                    (arp->ar_op == htons(ARPOP_REPLY) &&
+                     (addr_type == RTN_UNICAST ||
+                      (addr_type < 0 &&
+                       /* postpone calculation to as late as possible */
+                       inet_addr_type_dev_table(net, dev, sip) ==
+                               RTN_UNICAST)))))
                        n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
        }
 
index 65cc02bd82bc87991a29135a58c059c7916a5a35..93322f895eab6136adbe22aa37a4eedc9687a0e4 100644 (file)
@@ -248,6 +248,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
        u8 *tail;
        u8 *vaddr;
        int nfrags;
+       int esph_offset;
        struct page *page;
        struct sk_buff *trailer;
        int tailen = esp->tailen;
@@ -313,11 +314,13 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
        }
 
 cow:
+       esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
+
        nfrags = skb_cow_data(skb, tailen, &trailer);
        if (nfrags < 0)
                goto out;
        tail = skb_tail_pointer(trailer);
-       esp->esph = ip_esp_hdr(skb);
+       esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
 
 skip_cow:
        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
index da449ddb8cc172bd9091c00057a69a095f98b56d..ad9ad4aab5da7c7d11c3b80edbdfcbdd3d7153fe 100644 (file)
@@ -203,6 +203,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
 static void free_fib_info_rcu(struct rcu_head *head)
 {
        struct fib_info *fi = container_of(head, struct fib_info, rcu);
+       struct dst_metrics *m;
 
        change_nexthops(fi) {
                if (nexthop_nh->nh_dev)
@@ -213,8 +214,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
                rt_fibinfo_free(&nexthop_nh->nh_rth_input);
        } endfor_nexthops(fi);
 
-       if (fi->fib_metrics != (u32 *) dst_default_metrics)
-               kfree(fi->fib_metrics);
+       m = fi->fib_metrics;
+       if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
+               kfree(m);
        kfree(fi);
 }
 
@@ -971,11 +973,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
                        val = 255;
                if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
                        return -EINVAL;
-               fi->fib_metrics[type - 1] = val;
+               fi->fib_metrics->metrics[type - 1] = val;
        }
 
        if (ecn_ca)
-               fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
+               fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
 
        return 0;
 }
@@ -1033,11 +1035,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                goto failure;
        fib_info_cnt++;
        if (cfg->fc_mx) {
-               fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+               fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
                if (!fi->fib_metrics)
                        goto failure;
+               atomic_set(&fi->fib_metrics->refcnt, 1);
        } else
-               fi->fib_metrics = (u32 *) dst_default_metrics;
+               fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
 
        fi->fib_net = net;
        fi->fib_protocol = cfg->fc_protocol;
@@ -1238,7 +1241,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
        if (fi->fib_priority &&
            nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
                goto nla_put_failure;
-       if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
+       if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
                goto nla_put_failure;
 
        if (fi->fib_prefsrc &&
index 655d9eebe43e16a59102edcd3ea4bc177c6b341d..6883b3d4ba8f69de2cb924612d60f5671a219a84 100644 (file)
@@ -1385,8 +1385,12 @@ static void rt_add_uncached_list(struct rtable *rt)
 
 static void ipv4_dst_destroy(struct dst_entry *dst)
 {
+       struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
        struct rtable *rt = (struct rtable *) dst;
 
+       if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
+               kfree(p);
+
        if (!list_empty(&rt->rt_uncached)) {
                struct uncached_list *ul = rt->rt_uncached_list;
 
@@ -1438,7 +1442,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
                        rt->rt_gateway = nh->nh_gw;
                        rt->rt_uses_gateway = 1;
                }
-               dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+               dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
+               if (fi->fib_metrics != &dst_default_metrics) {
+                       rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+                       atomic_inc(&fi->fib_metrics->refcnt);
+               }
 #ifdef CONFIG_IP_ROUTE_CLASSID
                rt->dst.tclassid = nh->nh_tclassid;
 #endif
index 1e4c76d2b8278ba71d6cc2cf7ebfe483e241f76e..b5ea036ca78144b86622cb0944d0b840f7225ec5 100644 (file)
@@ -1084,9 +1084,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
+       struct sockaddr *uaddr = msg->msg_name;
        int err, flags;
 
-       if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
+       if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
+           (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
+            uaddr->sa_family == AF_UNSPEC))
                return -EOPNOTSUPP;
        if (tp->fastopen_req)
                return -EALREADY; /* Another Fast Open is in progress */
@@ -1108,7 +1111,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
                }
        }
        flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
-       err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+       err = __inet_stream_connect(sk->sk_socket, uaddr,
                                    msg->msg_namelen, flags, 1);
        /* fastopen_req could already be freed in __inet_stream_connect
         * if the connection times out or gets rst
@@ -2320,6 +2323,10 @@ int tcp_disconnect(struct sock *sk, int flags)
        tcp_set_ca_state(sk, TCP_CA_Open);
        tcp_clear_retrans(tp);
        inet_csk_delack_init(sk);
+       /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
+        * issue in __tcp_select_window()
+        */
+       icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
        tcp_init_send_head(sk);
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
@@ -2374,9 +2381,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
        return 0;
 }
 
-static int tcp_repair_options_est(struct tcp_sock *tp,
+static int tcp_repair_options_est(struct sock *sk,
                struct tcp_repair_opt __user *optbuf, unsigned int len)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_repair_opt opt;
 
        while (len >= sizeof(opt)) {
@@ -2389,6 +2397,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
                switch (opt.opt_code) {
                case TCPOPT_MSS:
                        tp->rx_opt.mss_clamp = opt.opt_val;
+                       tcp_mtup_init(sk);
                        break;
                case TCPOPT_WINDOW:
                        {
@@ -2548,7 +2557,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                if (!tp->repair)
                        err = -EINVAL;
                else if (sk->sk_state == TCP_ESTABLISHED)
-                       err = tcp_repair_options_est(tp,
+                       err = tcp_repair_options_est(sk,
                                        (struct tcp_repair_opt __user *)optval,
                                        optlen);
                else
index 6e3c512054a60715e8e2d16ffedd12cba6a3d2d9..324c9bcc5456b499b59cef40838b4e9829119e13 100644 (file)
@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
+       tcp_sk(sk)->prior_ssthresh = 0;
        if (icsk->icsk_ca_ops->init)
                icsk->icsk_ca_ops->init(sk);
        if (tcp_ca_needs_ecn(sk))
index 37ac9de713c69af30ae50d03e53ee472a7520b98..8d772fea1ddecd427a66c18f34d50f969186f02a 100644 (file)
@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
        struct ipv6hdr *ip6_hdr;
        struct ipv6_opt_hdr *hop;
        unsigned char buf[CALIPSO_MAX_BUFFER];
-       int len_delta, new_end, pad;
+       int len_delta, new_end, pad, payload;
        unsigned int start, end;
 
        ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
        if (ret_val < 0)
                return ret_val;
 
+       ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
+
        if (len_delta) {
                if (len_delta > 0)
                        skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
                        sizeof(*ip6_hdr) + start);
                skb_reset_network_header(skb);
                ip6_hdr = ipv6_hdr(skb);
+               payload = ntohs(ip6_hdr->payload_len);
+               ip6_hdr->payload_len = htons(payload + len_delta);
        }
 
        hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
index 8d128ba79b66de52d4d60628fe8df6cc9bccbc3b..0c5b4caa19491eb04bc755032611c76f03008acb 100644 (file)
@@ -537,11 +537,10 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
 
-       dsfield = ipv4_get_dsfield(iph);
-
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-               fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
-                                         & IPV6_TCLASS_MASK;
+               dsfield = ipv4_get_dsfield(iph);
+       else
+               dsfield = ip6_tclass(t->parms.flowinfo);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
                fl6.flowi6_mark = skb->mark;
        else
@@ -598,9 +597,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
 
-       dsfield = ipv6_get_dsfield(ipv6h);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
+               dsfield = ipv6_get_dsfield(ipv6h);
+       else
+               dsfield = ip6_tclass(t->parms.flowinfo);
+
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
                fl6.flowlabel |= ip6_flowlabel(ipv6h);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
index 280268f1dd7b0972d7fadbcc9e28b043ceae423d..cdb3728faca7746d91e2430f6024f060a82b24fd 100644 (file)
@@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
                if (udpfrag) {
                        int err = ip6_find_1stfragopt(skb, &prevhdr);
-                       if (err < 0)
+                       if (err < 0) {
+                               kfree_skb_list(segs);
                                return ERR_PTR(err);
+                       }
                        fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
                        fptr->frag_off = htons(offset);
                        if (skb->next)
index d4a31becbd25dda895d7391e1e65c2de237bf2a3..bf8a58a1c32d83a9605844075da5815be23a6bf1 100644 (file)
@@ -1466,6 +1466,11 @@ alloc_new_skb:
                         */
                        alloclen += sizeof(struct frag_hdr);
 
+                       copy = datalen - transhdrlen - fraggap;
+                       if (copy < 0) {
+                               err = -EINVAL;
+                               goto error;
+                       }
                        if (transhdrlen) {
                                skb = sock_alloc_send_skb(sk,
                                                alloclen + hh_len,
@@ -1515,13 +1520,9 @@ alloc_new_skb:
                                data += fraggap;
                                pskb_trim_unique(skb_prev, maxfraglen);
                        }
-                       copy = datalen - transhdrlen - fraggap;
-
-                       if (copy < 0) {
-                               err = -EINVAL;
-                               kfree_skb(skb);
-                               goto error;
-                       } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
+                       if (copy > 0 &&
+                           getfrag(from, data + transhdrlen, offset,
+                                   copy, fraggap, skb) < 0) {
                                err = -EFAULT;
                                kfree_skb(skb);
                                goto error;
index 6eb2ae507500b30959134aa3ed35c53c7b79aad9..9b37f9747fc6a6fbabb0740188bc98b5c95c41c4 100644 (file)
@@ -1095,6 +1095,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
 
        if (!dst) {
 route_lookup:
+               /* add dsfield to flowlabel for route lookup */
+               fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
+
                dst = ip6_route_output(net, NULL, fl6);
 
                if (dst->error)
@@ -1196,7 +1199,7 @@ route_lookup:
        skb_push(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        ipv6h = ipv6_hdr(skb);
-       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
+       ip6_flow_hdr(ipv6h, dsfield,
                     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
        ipv6h->hop_limit = hop_limit;
        ipv6h->nexthdr = proto;
@@ -1231,8 +1234,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (tproto != IPPROTO_IPIP && tproto != 0)
                return -1;
 
-       dsfield = ipv4_get_dsfield(iph);
-
        if (t->parms.collect_md) {
                struct ip_tunnel_info *tun_info;
                const struct ip_tunnel_key *key;
@@ -1246,6 +1247,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPIP;
                fl6.daddr = key->u.ipv6.dst;
                fl6.flowlabel = key->label;
+               dsfield = ip6_tclass(key->label);
        } else {
                if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                        encap_limit = t->parms.encap_limit;
@@ -1254,8 +1256,9 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPIP;
 
                if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-                       fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
-                                        & IPV6_TCLASS_MASK;
+                       dsfield = ipv4_get_dsfield(iph);
+               else
+                       dsfield = ip6_tclass(t->parms.flowinfo);
                if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
                        fl6.flowi6_mark = skb->mark;
                else
@@ -1267,6 +1270,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
                return -1;
 
+       dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
+
        skb_set_inner_ipproto(skb, IPPROTO_IPIP);
 
        err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1300,8 +1305,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
            ip6_tnl_addr_conflict(t, ipv6h))
                return -1;
 
-       dsfield = ipv6_get_dsfield(ipv6h);
-
        if (t->parms.collect_md) {
                struct ip_tunnel_info *tun_info;
                const struct ip_tunnel_key *key;
@@ -1315,6 +1318,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPV6;
                fl6.daddr = key->u.ipv6.dst;
                fl6.flowlabel = key->label;
+               dsfield = ip6_tclass(key->label);
        } else {
                offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
                /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
@@ -1337,7 +1341,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPV6;
 
                if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-                       fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
+                       dsfield = ipv6_get_dsfield(ipv6h);
+               else
+                       dsfield = ip6_tclass(t->parms.flowinfo);
                if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
                        fl6.flowlabel |= ip6_flowlabel(ipv6h);
                if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
@@ -1351,6 +1357,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
                return -1;
 
+       dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
+
        skb_set_inner_ipproto(skb, IPPROTO_IPV6);
 
        err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
index 9b522fa90e6d8f4a87ebed7cf574a36ceea89c61..ac826dd338ff0825eaf0d2d74cee92d008e018bb 100644 (file)
@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
        .type =      SOCK_DGRAM,
        .protocol =  IPPROTO_ICMPV6,
        .prot =      &pingv6_prot,
-       .ops =       &inet6_dgram_ops,
+       .ops =       &inet6_sockraw_ops,
        .flags =     INET_PROTOSW_REUSE,
 };
 
index 1f992d9e261d8b75226659a4cead95f8dc04dc4f..60be012fe7085cc7a199e84333cef5ee95ed1f04 100644 (file)
@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
 #endif /* CONFIG_PROC_FS */
 
 /* Same as inet6_dgram_ops, sans udp_poll.  */
-static const struct proto_ops inet6_sockraw_ops = {
+const struct proto_ops inet6_sockraw_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
        .release           = inet6_release,
index 0e015906f9ca91e11d3e9e124c532c6a7cb04c5d..07d36573f50b9451e4c2bfee331ac2c023791a7a 100644 (file)
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
        iph = ipv6_hdr(skb);
 
        hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+       if (hdr_len < 0)
+               return hdr_len;
        skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
        skb_set_network_header(skb, -x->props.header_len);
        skb->transport_header = skb->network_header + hdr_len;
index 7a92c0f3191250118ce3572ca91ee9116460bce8..9ad07a91708ef7a1008d469766ab39b9b882883f 100644 (file)
@@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
        skb_set_inner_transport_header(skb, skb_transport_offset(skb));
 
        hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+       if (hdr_len < 0)
+               return hdr_len;
        skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
        skb_set_network_header(skb, -x->props.header_len);
        skb->transport_header = skb->network_header + hdr_len;
index c1950bb14735bc914b53c0476342f2679ad50b87..512dc43d0ce6814f0a6d0507805025d75234eece 100644 (file)
@@ -3285,7 +3285,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
                p += pol->sadb_x_policy_len*8;
                sec_ctx = (struct sadb_x_sec_ctx *)p;
                if (len < pol->sadb_x_policy_len*8 +
-                   sec_ctx->sadb_x_sec_len) {
+                   sec_ctx->sadb_x_sec_len*8) {
                        *dir = -EINVAL;
                        goto out;
                }
index 8364fe5b59e4ca01ef8d05b1038dbe96fedf657b..c38d16f22d2a7ff265b8729d43b164312598fd9f 100644 (file)
@@ -311,6 +311,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        int rc = -EINVAL;
 
        dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
+
+       lock_sock(sk);
        if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
                goto out;
        rc = -EAFNOSUPPORT;
@@ -382,6 +384,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
 out_put:
        llc_sap_put(sap);
 out:
+       release_sock(sk);
        return rc;
 }
 
index 60e2a62f7bef2fbb014f3a40403cf498a75d429c..cf2392b2ac717972e4354346fd086ec802370de3 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -741,46 +741,43 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
        ieee80211_agg_start_txq(sta, tid, true);
 }
 
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
+void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
+                             struct tid_ampdu_tx *tid_tx)
 {
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
        struct ieee80211_local *local = sdata->local;
-       struct sta_info *sta;
-       struct tid_ampdu_tx *tid_tx;
 
-       trace_api_start_tx_ba_cb(sdata, ra, tid);
+       if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
+               return;
+
+       if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
+               ieee80211_agg_tx_operational(local, sta, tid);
+}
+
+static struct tid_ampdu_tx *
+ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
+                       const u8 *ra, u16 tid, struct sta_info **sta)
+{
+       struct tid_ampdu_tx *tid_tx;
 
        if (tid >= IEEE80211_NUM_TIDS) {
                ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
                       tid, IEEE80211_NUM_TIDS);
-               return;
+               return NULL;
        }
 
-       mutex_lock(&local->sta_mtx);
-       sta = sta_info_get_bss(sdata, ra);
-       if (!sta) {
-               mutex_unlock(&local->sta_mtx);
+       *sta = sta_info_get_bss(sdata, ra);
+       if (!*sta) {
                ht_dbg(sdata, "Could not find station: %pM\n", ra);
-               return;
+               return NULL;
        }
 
-       mutex_lock(&sta->ampdu_mlme.mtx);
-       tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+       tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
 
-       if (WARN_ON(!tid_tx)) {
+       if (WARN_ON(!tid_tx))
                ht_dbg(sdata, "addBA was not requested!\n");
-               goto unlock;
-       }
 
-       if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
-               goto unlock;
-
-       if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
-               ieee80211_agg_tx_operational(local, sta, tid);
-
- unlock:
-       mutex_unlock(&sta->ampdu_mlme.mtx);
-       mutex_unlock(&local->sta_mtx);
+       return tid_tx;
 }
 
 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -788,19 +785,20 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_ra_tid *ra_tid;
-       struct sk_buff *skb = dev_alloc_skb(0);
+       struct sta_info *sta;
+       struct tid_ampdu_tx *tid_tx;
 
-       if (unlikely(!skb))
-               return;
+       trace_api_start_tx_ba_cb(sdata, ra, tid);
 
-       ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-       memcpy(&ra_tid->ra, ra, ETH_ALEN);
-       ra_tid->tid = tid;
+       rcu_read_lock();
+       tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
+       if (!tid_tx)
+               goto out;
 
-       skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
-       skb_queue_tail(&sdata->skb_queue, skb);
-       ieee80211_queue_work(&local->hw, &sdata->work);
+       set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
+       ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ out:
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
 
@@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
 }
 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
 
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
+void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
+                            struct tid_ampdu_tx *tid_tx)
 {
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-       struct ieee80211_local *local = sdata->local;
-       struct sta_info *sta;
-       struct tid_ampdu_tx *tid_tx;
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
        bool send_delba = false;
 
-       trace_api_stop_tx_ba_cb(sdata, ra, tid);
-
-       if (tid >= IEEE80211_NUM_TIDS) {
-               ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
-                      tid, IEEE80211_NUM_TIDS);
-               return;
-       }
-
-       ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
-
-       mutex_lock(&local->sta_mtx);
-
-       sta = sta_info_get_bss(sdata, ra);
-       if (!sta) {
-               ht_dbg(sdata, "Could not find station: %pM\n", ra);
-               goto unlock;
-       }
+       ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
+              sta->sta.addr, tid);
 
-       mutex_lock(&sta->ampdu_mlme.mtx);
        spin_lock_bh(&sta->lock);
-       tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 
-       if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+       if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
                ht_dbg(sdata,
                       "unexpected callback to A-MPDU stop for %pM tid %d\n",
                       sta->sta.addr, tid);
@@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
        spin_unlock_bh(&sta->lock);
 
        if (send_delba)
-               ieee80211_send_delba(sdata, ra, tid,
+               ieee80211_send_delba(sdata, sta->sta.addr, tid,
                        WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
-
-       mutex_unlock(&sta->ampdu_mlme.mtx);
- unlock:
-       mutex_unlock(&local->sta_mtx);
 }
 
 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_ra_tid *ra_tid;
-       struct sk_buff *skb = dev_alloc_skb(0);
+       struct sta_info *sta;
+       struct tid_ampdu_tx *tid_tx;
 
-       if (unlikely(!skb))
-               return;
+       trace_api_stop_tx_ba_cb(sdata, ra, tid);
 
-       ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-       memcpy(&ra_tid->ra, ra, ETH_ALEN);
-       ra_tid->tid = tid;
+       rcu_read_lock();
+       tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
+       if (!tid_tx)
+               goto out;
 
-       skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
-       skb_queue_tail(&sdata->skb_queue, skb);
-       ieee80211_queue_work(&local->hw, &sdata->work);
+       set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
+       ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ out:
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
 
index f4a52877356349abed96d0a77d6ae75f301181e4..6ca5442b1e03b18aa81764089bf74c002d337690 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
+ * Copyright 2017      Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
 {
        int i;
 
-       cancel_work_sync(&sta->ampdu_mlme.work);
-
        for (i = 0; i <  IEEE80211_NUM_TIDS; i++) {
                __ieee80211_stop_tx_ba_session(sta, i, reason);
                __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
                                               reason != AGG_STOP_DESTROY_STA &&
                                               reason != AGG_STOP_PEER_REQUEST);
        }
+
+       /* stopping might queue the work again - so cancel only afterwards */
+       cancel_work_sync(&sta->ampdu_mlme.work);
 }
 
 void ieee80211_ba_session_work(struct work_struct *work)
@@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
                spin_unlock_bh(&sta->lock);
 
                tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-               if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
-                                                &tid_tx->state))
+               if (!tid_tx)
+                       continue;
+
+               if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
+                       ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
+               if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
                        ___ieee80211_stop_tx_ba_session(sta, tid,
                                                        AGG_STOP_LOCAL_REQUEST);
+               if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
+                       ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
        }
        mutex_unlock(&sta->ampdu_mlme.mtx);
 }
index f8f6c148f5545feeb78c16ca6f33ebf5e02d0ab5..665501ac358f8d83630f2727fe6249dcfeeb9689 100644 (file)
@@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg {
 
 enum sdata_queue_type {
        IEEE80211_SDATA_QUEUE_TYPE_FRAME        = 0,
-       IEEE80211_SDATA_QUEUE_AGG_START         = 1,
-       IEEE80211_SDATA_QUEUE_AGG_STOP          = 2,
        IEEE80211_SDATA_QUEUE_RX_AGG_START      = 3,
        IEEE80211_SDATA_QUEUE_RX_AGG_STOP       = 4,
 };
@@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
        return local->hw.wiphy->bands[band];
 }
 
-/* this struct represents 802.11n's RA/TID combination */
-struct ieee80211_ra_tid {
-       u8 ra[ETH_ALEN];
-       u16 tid;
-};
-
 /* this struct holds the value parsing from channel switch IE  */
 struct ieee80211_csa_ie {
        struct cfg80211_chan_def chandef;
@@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                                   enum ieee80211_agg_stop_reason reason);
 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                                    enum ieee80211_agg_stop_reason reason);
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
+void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
+                             struct tid_ampdu_tx *tid_tx);
+void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
+                            struct tid_ampdu_tx *tid_tx);
 void ieee80211_ba_session_work(struct work_struct *work);
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
index 3bd5b81f5d81ec7d73686043c2683630e24ecde4..8fae1a72e6a7c7ea4f71ec3a3beb215b987a715f 100644 (file)
@@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work)
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
        struct sta_info *sta;
-       struct ieee80211_ra_tid *ra_tid;
        struct ieee80211_rx_agg *rx_agg;
 
        if (!ieee80211_sdata_running(sdata))
@@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work)
        while ((skb = skb_dequeue(&sdata->skb_queue))) {
                struct ieee80211_mgmt *mgmt = (void *)skb->data;
 
-               if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
-                       ra_tid = (void *)&skb->cb;
-                       ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
-                                                ra_tid->tid);
-               } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
-                       ra_tid = (void *)&skb->cb;
-                       ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
-                                               ra_tid->tid);
-               } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
+               if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
                        rx_agg = (void *)&skb->cb;
                        mutex_lock(&local->sta_mtx);
                        sta = sta_info_get_bss(sdata, rx_agg->addr);
index 35f4c7d7a50089e3998ff9860e86f6d26e1da600..1f75280ba26c78b3ad9864d0b63305cbba43f8fe 100644 (file)
@@ -2492,7 +2492,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
                if (is_multicast_ether_addr(hdr->addr1)) {
                        mpp_addr = hdr->addr3;
                        proxied_addr = mesh_hdr->eaddr1;
-               } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
+               } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
+                           MESH_FLAGS_AE_A5_A6) {
                        /* has_a4 already checked in ieee80211_rx_mesh_check */
                        mpp_addr = hdr->addr4;
                        proxied_addr = mesh_hdr->eaddr2;
index 7cdf7a835bb01e8fade3b9d9bb6efaa19158f72b..403e3cc58b573dc511c8ba399ddefdc7e8e24ba0 100644 (file)
@@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                        struct ieee80211_sta_rx_stats *cpurxs;
 
                        cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
-                       sinfo->rx_packets += cpurxs->dropped;
+                       sinfo->rx_dropped_misc += cpurxs->dropped;
                }
        }
 
index 5609cacb20d5f31e02ba877f88a6f0290e18ce8b..ea0747d6a6da194fec9116dc14b58a543f1accf2 100644 (file)
@@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags {
 #define HT_AGG_STATE_STOPPING          3
 #define HT_AGG_STATE_WANT_START                4
 #define HT_AGG_STATE_WANT_STOP         5
+#define HT_AGG_STATE_START_CB          6
+#define HT_AGG_STATE_STOP_CB           7
 
 enum ieee80211_agg_stop_reason {
        AGG_STOP_DECLINED,
index 257ec66009da2dd7010d063c0ad29dbb9a32564e..7b05fd1497ceddea47c2c7917f5ee58f6ff2d560 100644 (file)
@@ -1418,7 +1418,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
                                continue;
                        alive++;
                        nh_flags &= ~flags;
-                       WRITE_ONCE(nh->nh_flags, flags);
+                       WRITE_ONCE(nh->nh_flags, nh_flags);
                } endfor_nexthops(rt);
 
                WRITE_ONCE(rt->rt_nhn_alive, alive);
index d2d7bdf1d5104b6e68284bbbb533c30f159844e2..ad99c1ceea6f42bf3e52500a4452f7f74e730be5 100644 (file)
@@ -849,10 +849,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
 {
        unsigned int verdict = NF_DROP;
 
-       if (IP_VS_FWD_METHOD(cp) != 0) {
-               pr_err("shouldn't reach here, because the box is on the "
-                      "half connection in the tun/dr module.\n");
-       }
+       if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+               goto ignore_cp;
 
        /* Ensure the checksum is correct */
        if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
@@ -886,6 +884,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
                ip_vs_notrack(skb);
        else
                ip_vs_update_conntrack(skb, cp, 0);
+
+ignore_cp:
        verdict = NF_ACCEPT;
 
 out:
@@ -1385,8 +1385,11 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
         */
        cp = pp->conn_out_get(ipvs, af, skb, &iph);
 
-       if (likely(cp))
+       if (likely(cp)) {
+               if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+                       goto ignore_cp;
                return handle_response(af, skb, pd, cp, &iph, hooknum);
+       }
 
        /* Check for real-server-started requests */
        if (atomic_read(&ipvs->conn_out_counter)) {
@@ -1444,9 +1447,15 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
                        }
                }
        }
+
+out:
        IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
                      "ip_vs_out: packet continues traversal as normal");
        return NF_ACCEPT;
+
+ignore_cp:
+       __ip_vs_conn_put(cp);
+       goto out;
 }
 
 /*
index 3a60efa7799b2e4569af35ce943c67fc354dc68a..7f6100ca63be6dd4f37c24852fd16b9a71b8a823 100644 (file)
@@ -174,6 +174,10 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
 #endif
        if (h != NULL && !try_module_get(h->me))
                h = NULL;
+       if (h != NULL && !refcount_inc_not_zero(&h->refcnt)) {
+               module_put(h->me);
+               h = NULL;
+       }
 
        rcu_read_unlock();
 
@@ -181,6 +185,13 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
 
+void nf_conntrack_helper_put(struct nf_conntrack_helper *helper)
+{
+       refcount_dec(&helper->refcnt);
+       module_put(helper->me);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_helper_put);
+
 struct nf_conn_help *
 nf_ct_helper_ext_add(struct nf_conn *ct,
                     struct nf_conntrack_helper *helper, gfp_t gfp)
@@ -417,6 +428,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
                        }
                }
        }
+       refcount_set(&me->refcnt, 1);
        hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
        nf_ct_helper_count++;
 out:
index dcf561b5c97a47e627ee00649d756635db0e6fb3..a8be9b72e6cd2ca34166bba49a532f4f92e86e9e 100644 (file)
@@ -45,6 +45,8 @@
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/nf_conntrack_timestamp.h>
 #include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
 #ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_core.h>
 #include <net/netfilter/nf_nat_l4proto.h>
@@ -888,8 +890,13 @@ restart:
        }
 out:
        local_bh_enable();
-       if (last)
+       if (last) {
+               /* nf ct hash resize happened, now clear the leftover. */
+               if ((struct nf_conn *)cb->args[1] == last)
+                       cb->args[1] = 0;
+
                nf_ct_put(last);
+       }
 
        while (i) {
                i--;
@@ -1007,9 +1014,8 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
 
 static int
 ctnetlink_parse_tuple(const struct nlattr * const cda[],
-                     struct nf_conntrack_tuple *tuple,
-                     enum ctattr_type type, u_int8_t l3num,
-                     struct nf_conntrack_zone *zone)
+                     struct nf_conntrack_tuple *tuple, u32 type,
+                     u_int8_t l3num, struct nf_conntrack_zone *zone)
 {
        struct nlattr *tb[CTA_TUPLE_MAX+1];
        int err;
@@ -1828,6 +1834,8 @@ ctnetlink_create_conntrack(struct net *net,
        nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
        nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
        nf_ct_labels_ext_add(ct);
+       nfct_seqadj_ext_add(ct);
+       nfct_synproxy_ext_add(ct);
 
        /* we must add conntrack extensions before confirmation. */
        ct->status |= IPS_CONFIRMED;
@@ -2447,7 +2455,7 @@ static struct nfnl_ct_hook ctnetlink_glue_hook = {
 
 static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
                                    const struct nf_conntrack_tuple *tuple,
-                                   enum ctattr_expect type)
+                                   u32 type)
 {
        struct nlattr *nest_parms;
 
index 13875d599a85713bbfeb2fee7b8978fa498a10ed..1c5b14a6cab369591bd13e22b284a0737ad75c2e 100644 (file)
@@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
                      u8 pf, unsigned int hooknum)
 {
        const struct sctphdr *sh;
-       struct sctphdr _sctph;
        const char *logmsg;
 
-       sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
-       if (!sh) {
+       if (skb->len < dataoff + sizeof(struct sctphdr)) {
                logmsg = "nf_ct_sctp: short packet ";
                goto out_invalid;
        }
        if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
            skb->ip_summed == CHECKSUM_NONE) {
+               if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
+                       logmsg = "nf_ct_sctp: failed to read header ";
+                       goto out_invalid;
+               }
+               sh = (const struct sctphdr *)(skb->data + dataoff);
                if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
                        logmsg = "nf_ct_sctp: bad CRC ";
                        goto out_invalid;
index b48d6b5aae8a87d4ea69cae0e025739ebe3f1658..6c72922d20caee83f498cb02cdce0c46899a1c22 100644 (file)
@@ -409,6 +409,10 @@ nf_nat_setup_info(struct nf_conn *ct,
 {
        struct nf_conntrack_tuple curr_tuple, new_tuple;
 
+       /* Can't setup nat info for confirmed ct. */
+       if (nf_ct_is_confirmed(ct))
+               return NF_ACCEPT;
+
        NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
                     maniptype == NF_NAT_MANIP_DST);
        BUG_ON(nf_nat_initialized(ct, maniptype));
@@ -562,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
         * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
         * will delete entry from already-freed table.
         */
-       ct->status &= ~IPS_NAT_DONE_MASK;
+       clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
        rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
                        nf_nat_bysource_params);
 
index 5592250297402fe6e272f3213efa7e02ab230485..da314be0c048720172bbd153cd2f730b486603ce 100644 (file)
@@ -3367,35 +3367,50 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
        return nf_tables_fill_setelem(args->skb, set, elem);
 }
 
+struct nft_set_dump_ctx {
+       const struct nft_set    *set;
+       struct nft_ctx          ctx;
+};
+
 static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct nft_set_dump_ctx *dump_ctx = cb->data;
        struct net *net = sock_net(skb->sk);
-       u8 genmask = nft_genmask_cur(net);
+       struct nft_af_info *afi;
+       struct nft_table *table;
        struct nft_set *set;
        struct nft_set_dump_args args;
-       struct nft_ctx ctx;
-       struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
+       bool set_found = false;
        struct nfgenmsg *nfmsg;
        struct nlmsghdr *nlh;
        struct nlattr *nest;
        u32 portid, seq;
-       int event, err;
+       int event;
 
-       err = nlmsg_parse(cb->nlh, sizeof(struct nfgenmsg), nla,
-                         NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy,
-                         NULL);
-       if (err < 0)
-               return err;
+       rcu_read_lock();
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
+               if (afi != dump_ctx->ctx.afi)
+                       continue;
 
-       err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh,
-                                        (void *)nla, genmask);
-       if (err < 0)
-               return err;
+               list_for_each_entry_rcu(table, &afi->tables, list) {
+                       if (table != dump_ctx->ctx.table)
+                               continue;
 
-       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET],
-                                  genmask);
-       if (IS_ERR(set))
-               return PTR_ERR(set);
+                       list_for_each_entry_rcu(set, &table->sets, list) {
+                               if (set == dump_ctx->set) {
+                                       set_found = true;
+                                       break;
+                               }
+                       }
+                       break;
+               }
+               break;
+       }
+
+       if (!set_found) {
+               rcu_read_unlock();
+               return -ENOENT;
+       }
 
        event  = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM);
        portid = NETLINK_CB(cb->skb).portid;
@@ -3407,11 +3422,11 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
                goto nla_put_failure;
 
        nfmsg = nlmsg_data(nlh);
-       nfmsg->nfgen_family = ctx.afi->family;
+       nfmsg->nfgen_family = afi->family;
        nfmsg->version      = NFNETLINK_V0;
-       nfmsg->res_id       = htons(ctx.net->nft.base_seq & 0xffff);
+       nfmsg->res_id       = htons(net->nft.base_seq & 0xffff);
 
-       if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name))
+       if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name))
                goto nla_put_failure;
        if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
                goto nla_put_failure;
@@ -3422,12 +3437,13 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
 
        args.cb                 = cb;
        args.skb                = skb;
-       args.iter.genmask       = nft_genmask_cur(ctx.net);
+       args.iter.genmask       = nft_genmask_cur(net);
        args.iter.skip          = cb->args[0];
        args.iter.count         = 0;
        args.iter.err           = 0;
        args.iter.fn            = nf_tables_dump_setelem;
-       set->ops->walk(&ctx, set, &args.iter);
+       set->ops->walk(&dump_ctx->ctx, set, &args.iter);
+       rcu_read_unlock();
 
        nla_nest_end(skb, nest);
        nlmsg_end(skb, nlh);
@@ -3441,9 +3457,16 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
        return skb->len;
 
 nla_put_failure:
+       rcu_read_unlock();
        return -ENOSPC;
 }
 
+static int nf_tables_dump_set_done(struct netlink_callback *cb)
+{
+       kfree(cb->data);
+       return 0;
+}
+
 static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
                                struct sk_buff *skb, const struct nlmsghdr *nlh,
                                const struct nlattr * const nla[])
@@ -3465,7 +3488,18 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
                        .dump = nf_tables_dump_set,
+                       .done = nf_tables_dump_set_done,
                };
+               struct nft_set_dump_ctx *dump_ctx;
+
+               dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_KERNEL);
+               if (!dump_ctx)
+                       return -ENOMEM;
+
+               dump_ctx->set = set;
+               dump_ctx->ctx = ctx;
+
+               c.data = dump_ctx;
                return netlink_dump_start(nlsk, skb, nlh, &c);
        }
        return -EOPNOTSUPP;
@@ -3593,9 +3627,9 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
 {
        struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
 
-       nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE);
+       nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE);
        if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
-               nft_data_uninit(nft_set_ext_data(ext), set->dtype);
+               nft_data_release(nft_set_ext_data(ext), set->dtype);
        if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
                nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
        if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
@@ -3604,6 +3638,18 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
 }
 EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
 
+/* Only called from commit path, nft_set_elem_deactivate() already deals with
+ * the refcounting from the preparation phase.
+ */
+static void nf_tables_set_elem_destroy(const struct nft_set *set, void *elem)
+{
+       struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
+
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
+               nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
+       kfree(elem);
+}
+
 static int nft_setelem_parse_flags(const struct nft_set *set,
                                   const struct nlattr *attr, u32 *flags)
 {
@@ -3815,9 +3861,9 @@ err4:
        kfree(elem.priv);
 err3:
        if (nla[NFTA_SET_ELEM_DATA] != NULL)
-               nft_data_uninit(&data, d2.type);
+               nft_data_release(&data, d2.type);
 err2:
-       nft_data_uninit(&elem.key.val, d1.type);
+       nft_data_release(&elem.key.val, d1.type);
 err1:
        return err;
 }
@@ -3862,6 +3908,53 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
        return err;
 }
 
+/**
+ *     nft_data_hold - hold a nft_data item
+ *
+ *     @data: struct nft_data to release
+ *     @type: type of data
+ *
+ *     Hold a nft_data item. NFT_DATA_VALUE types can be silently discarded,
+ *     NFT_DATA_VERDICT bumps the reference to chains in case of NFT_JUMP and
+ *     NFT_GOTO verdicts. This function must be called on active data objects
+ *     from the second phase of the commit protocol.
+ */
+static void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
+{
+       if (type == NFT_DATA_VERDICT) {
+               switch (data->verdict.code) {
+               case NFT_JUMP:
+               case NFT_GOTO:
+                       data->verdict.chain->use++;
+                       break;
+               }
+       }
+}
+
+static void nft_set_elem_activate(const struct net *net,
+                                 const struct nft_set *set,
+                                 struct nft_set_elem *elem)
+{
+       const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+               nft_data_hold(nft_set_ext_data(ext), set->dtype);
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
+               (*nft_set_ext_obj(ext))->use++;
+}
+
+static void nft_set_elem_deactivate(const struct net *net,
+                                   const struct nft_set *set,
+                                   struct nft_set_elem *elem)
+{
+       const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+               nft_data_release(nft_set_ext_data(ext), set->dtype);
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
+               (*nft_set_ext_obj(ext))->use--;
+}
+
 static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
                           const struct nlattr *attr)
 {
@@ -3927,6 +4020,8 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
        kfree(elem.priv);
        elem.priv = priv;
 
+       nft_set_elem_deactivate(ctx->net, set, &elem);
+
        nft_trans_elem(trans) = elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
        return 0;
@@ -3936,7 +4031,7 @@ err4:
 err3:
        kfree(elem.priv);
 err2:
-       nft_data_uninit(&elem.key.val, desc.type);
+       nft_data_release(&elem.key.val, desc.type);
 err1:
        return err;
 }
@@ -4743,8 +4838,8 @@ static void nf_tables_commit_release(struct nft_trans *trans)
                nft_set_destroy(nft_trans_set(trans));
                break;
        case NFT_MSG_DELSETELEM:
-               nft_set_elem_destroy(nft_trans_elem_set(trans),
-                                    nft_trans_elem(trans).priv, true);
+               nf_tables_set_elem_destroy(nft_trans_elem_set(trans),
+                                          nft_trans_elem(trans).priv);
                break;
        case NFT_MSG_DELOBJ:
                nft_obj_destroy(nft_trans_obj(trans));
@@ -4979,6 +5074,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
                case NFT_MSG_DELSETELEM:
                        te = (struct nft_trans_elem *)trans->data;
 
+                       nft_set_elem_activate(net, te->set, &te->elem);
                        te->set->ops->activate(net, te->set, &te->elem);
                        te->set->ndeact--;
 
@@ -5464,7 +5560,7 @@ int nft_data_init(const struct nft_ctx *ctx,
 EXPORT_SYMBOL_GPL(nft_data_init);
 
 /**
- *     nft_data_uninit - release a nft_data item
+ *     nft_data_release - release a nft_data item
  *
  *     @data: struct nft_data to release
  *     @type: type of data
@@ -5472,7 +5568,7 @@ EXPORT_SYMBOL_GPL(nft_data_init);
  *     Release a nft_data item. NFT_DATA_VALUE types can be silently discarded,
  *     all others need to be released by calling this function.
  */
-void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
+void nft_data_release(const struct nft_data *data, enum nft_data_types type)
 {
        if (type < NFT_DATA_VERDICT)
                return;
@@ -5483,7 +5579,7 @@ void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
                WARN_ON(1);
        }
 }
-EXPORT_SYMBOL_GPL(nft_data_uninit);
+EXPORT_SYMBOL_GPL(nft_data_release);
 
 int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
                  enum nft_data_types type, unsigned int len)
index 950bf6eadc6578516ac92b50427fe682cba3976d..be678a323598c3237a2cae09e4e3ed4bdea46614 100644 (file)
@@ -686,6 +686,7 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
                tuple_set = true;
        }
 
+       ret = -ENOENT;
        list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
                cur = &nlcth->helper;
                j++;
@@ -699,16 +700,20 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
                     tuple.dst.protonum != cur->tuple.dst.protonum))
                        continue;
 
-               found = true;
-               nf_conntrack_helper_unregister(cur);
-               kfree(cur->expect_policy);
+               if (refcount_dec_if_one(&cur->refcnt)) {
+                       found = true;
+                       nf_conntrack_helper_unregister(cur);
+                       kfree(cur->expect_policy);
 
-               list_del(&nlcth->list);
-               kfree(nlcth);
+                       list_del(&nlcth->list);
+                       kfree(nlcth);
+               } else {
+                       ret = -EBUSY;
+               }
        }
 
        /* Make sure we return success if we flush and there is no helpers */
-       return (found || j == 0) ? 0 : -ENOENT;
+       return (found || j == 0) ? 0 : ret;
 }
 
 static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
index 877d9acd91ef5c616c43d00f265facbfe4c2d334..fff8073e2a5692c14037a77c5d8151cf0c1bbcb0 100644 (file)
@@ -83,17 +83,26 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
                            tb[NFTA_BITWISE_MASK]);
        if (err < 0)
                return err;
-       if (d1.len != priv->len)
-               return -EINVAL;
+       if (d1.len != priv->len) {
+               err = -EINVAL;
+               goto err1;
+       }
 
        err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2,
                            tb[NFTA_BITWISE_XOR]);
        if (err < 0)
-               return err;
-       if (d2.len != priv->len)
-               return -EINVAL;
+               goto err1;
+       if (d2.len != priv->len) {
+               err = -EINVAL;
+               goto err2;
+       }
 
        return 0;
+err2:
+       nft_data_release(&priv->xor, d2.type);
+err1:
+       nft_data_release(&priv->mask, d1.type);
+       return err;
 }
 
 static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
index 2b96effeadc1bc708a1f16e89342c8d39c4c4da7..c2945eb3397c8991ae05ea84abb2ba15591cbefb 100644 (file)
@@ -201,10 +201,18 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
        if (err < 0)
                return ERR_PTR(err);
 
+       if (desc.type != NFT_DATA_VALUE) {
+               err = -EINVAL;
+               goto err1;
+       }
+
        if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
                return &nft_cmp_fast_ops;
-       else
-               return &nft_cmp_ops;
+
+       return &nft_cmp_ops;
+err1:
+       nft_data_release(&data, desc.type);
+       return ERR_PTR(-EINVAL);
 }
 
 struct nft_expr_type nft_cmp_type __read_mostly = {
index a34ceb38fc55681962daad2c323f0011a5fe683d..1678e9e75e8ee7d22301d6083ddeb04dd39ab385 100644 (file)
@@ -826,9 +826,9 @@ static void nft_ct_helper_obj_destroy(struct nft_object *obj)
        struct nft_ct_helper_obj *priv = nft_obj_data(obj);
 
        if (priv->helper4)
-               module_put(priv->helper4->me);
+               nf_conntrack_helper_put(priv->helper4);
        if (priv->helper6)
-               module_put(priv->helper6->me);
+               nf_conntrack_helper_put(priv->helper6);
 }
 
 static void nft_ct_helper_obj_eval(struct nft_object *obj,
index 728baf88295aab3d4f0e1272d551672ae5a2fb13..4717d77969271c324087ed7677df636b414e54ad 100644 (file)
@@ -65,7 +65,7 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
        return 0;
 
 err1:
-       nft_data_uninit(&priv->data, desc.type);
+       nft_data_release(&priv->data, desc.type);
        return err;
 }
 
@@ -73,7 +73,8 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
                                  const struct nft_expr *expr)
 {
        const struct nft_immediate_expr *priv = nft_expr_priv(expr);
-       return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg));
+
+       return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
 }
 
 static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
index 9edc74eedc1021e836bc767defc02fac1a63333f..cedb96c3619fa991395602dff1363314d3de13ea 100644 (file)
@@ -102,9 +102,9 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
        priv->len = desc_from.len;
        return 0;
 err2:
-       nft_data_uninit(&priv->data_to, desc_to.type);
+       nft_data_release(&priv->data_to, desc_to.type);
 err1:
-       nft_data_uninit(&priv->data_from, desc_from.type);
+       nft_data_release(&priv->data_from, desc_from.type);
        return err;
 }
 
index 8ec086b6b56b742485e34511b38a77af848d9f99..3d3a6df4ce70ea0950a4f07cab75b3c54680e09a 100644 (file)
@@ -222,7 +222,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
        struct nft_set_elem elem;
        int err;
 
-       err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
+       err = rhashtable_walk_init(&priv->ht, &hti, GFP_ATOMIC);
        iter->err = err;
        if (err)
                return;
index e97e2fb53f0a107b0361322be10f16b4ab4b5d32..fbdbaa00dd5fd751f6ab8f428de987017b5bdf79 100644 (file)
@@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                else if (d > 0)
                        p = &parent->rb_right;
                else {
-                       if (nft_set_elem_active(&rbe->ext, genmask)) {
-                               if (nft_rbtree_interval_end(rbe) &&
-                                   !nft_rbtree_interval_end(new))
-                                       p = &parent->rb_left;
-                               else if (!nft_rbtree_interval_end(rbe) &&
-                                        nft_rbtree_interval_end(new))
-                                       p = &parent->rb_right;
-                               else {
-                                       *ext = &rbe->ext;
-                                       return -EEXIST;
-                               }
+                       if (nft_rbtree_interval_end(rbe) &&
+                           !nft_rbtree_interval_end(new)) {
+                               p = &parent->rb_left;
+                       } else if (!nft_rbtree_interval_end(rbe) &&
+                                  nft_rbtree_interval_end(new)) {
+                               p = &parent->rb_right;
+                       } else if (nft_set_elem_active(&rbe->ext, genmask)) {
+                               *ext = &rbe->ext;
+                               return -EEXIST;
+                       } else {
+                               p = &parent->rb_left;
                        }
                }
        }
index 8876b7da6884c210393d1988032cbb5bd7018507..1770c1d9b37fc14be9d9d8cf4721d21c745826ad 100644 (file)
@@ -283,28 +283,30 @@ static int xt_obj_to_user(u16 __user *psize, u16 size,
                       &U->u.user.revision, K->u.kernel.TYPE->revision)
 
 int xt_data_to_user(void __user *dst, const void *src,
-                   int usersize, int size)
+                   int usersize, int size, int aligned_size)
 {
        usersize = usersize ? : size;
        if (copy_to_user(dst, src, usersize))
                return -EFAULT;
-       if (usersize != size && clear_user(dst + usersize, size - usersize))
+       if (usersize != aligned_size &&
+           clear_user(dst + usersize, aligned_size - usersize))
                return -EFAULT;
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(xt_data_to_user);
 
-#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE)                            \
+#define XT_DATA_TO_USER(U, K, TYPE)                                    \
        xt_data_to_user(U->data, K->data,                               \
                        K->u.kernel.TYPE->usersize,                     \
-                       C_SIZE ? : K->u.kernel.TYPE->TYPE##size)
+                       K->u.kernel.TYPE->TYPE##size,                   \
+                       XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
 
 int xt_match_to_user(const struct xt_entry_match *m,
                     struct xt_entry_match __user *u)
 {
        return XT_OBJ_TO_USER(u, m, match, 0) ||
-              XT_DATA_TO_USER(u, m, match, 0);
+              XT_DATA_TO_USER(u, m, match);
 }
 EXPORT_SYMBOL_GPL(xt_match_to_user);
 
@@ -312,7 +314,7 @@ int xt_target_to_user(const struct xt_entry_target *t,
                      struct xt_entry_target __user *u)
 {
        return XT_OBJ_TO_USER(u, t, target, 0) ||
-              XT_DATA_TO_USER(u, t, target, 0);
+              XT_DATA_TO_USER(u, t, target);
 }
 EXPORT_SYMBOL_GPL(xt_target_to_user);
 
@@ -611,6 +613,12 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 }
 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
 
+#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE)                     \
+       xt_data_to_user(U->data, K->data,                               \
+                       K->u.kernel.TYPE->usersize,                     \
+                       C_SIZE,                                         \
+                       COMPAT_XT_ALIGN(C_SIZE))
+
 int xt_compat_match_to_user(const struct xt_entry_match *m,
                            void __user **dstptr, unsigned int *size)
 {
@@ -626,7 +634,7 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
                if (match->compat_to_user((void __user *)cm->data, m->data))
                        return -EFAULT;
        } else {
-               if (XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
+               if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
                        return -EFAULT;
        }
 
@@ -972,7 +980,7 @@ int xt_compat_target_to_user(const struct xt_entry_target *t,
                if (target->compat_to_user((void __user *)ct->data, t->data))
                        return -EFAULT;
        } else {
-               if (XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
+               if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
                        return -EFAULT;
        }
 
index bb7ad82dcd5603e810db8fba35f81d3f2c03a2b7..623ef37de886fa22fd508a1261f0c1934f767e5f 100644 (file)
@@ -96,7 +96,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
 
        help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
        if (help == NULL) {
-               module_put(helper->me);
+               nf_conntrack_helper_put(helper);
                return -ENOMEM;
        }
 
@@ -263,7 +263,7 @@ out:
 err4:
        help = nfct_help(ct);
        if (help)
-               module_put(help->helper->me);
+               nf_conntrack_helper_put(help->helper);
 err3:
        nf_ct_tmpl_free(ct);
 err2:
@@ -346,7 +346,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
        if (ct) {
                help = nfct_help(ct);
                if (help)
-                       module_put(help->helper->me);
+                       nf_conntrack_helper_put(help->helper);
 
                nf_ct_netns_put(par->net, par->family);
 
index ee841f00a6ec715914fb14bb31dc8405f8dd4c4e..7586d446d7dcafc5c44b43190398840b68107d1f 100644 (file)
@@ -62,6 +62,7 @@
 #include <asm/cacheflush.h>
 #include <linux/hash.h>
 #include <linux/genetlink.h>
+#include <linux/net_namespace.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk,
                goto out;
        }
        NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
-       NETLINK_CB(p->skb2).nsid_is_set = true;
+       if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
+               NETLINK_CB(p->skb2).nsid_is_set = true;
        val = netlink_broadcast_deliver(sk, p->skb2);
        if (val < 0) {
                netlink_overrun(sk);
index bf602e33c40af4896240c9cc0566fa10126cf662..08679ebb3068298a58a081926c6a7dd5a2a73d17 100644 (file)
@@ -1123,7 +1123,7 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
 
        help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL);
        if (!help) {
-               module_put(helper->me);
+               nf_conntrack_helper_put(helper);
                return -ENOMEM;
        }
 
@@ -1584,7 +1584,7 @@ void ovs_ct_free_action(const struct nlattr *a)
 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
 {
        if (ct_info->helper)
-               module_put(ct_info->helper->me);
+               nf_conntrack_helper_put(ct_info->helper);
        if (ct_info->ct)
                nf_ct_tmpl_free(ct_info->ct);
 }
index dee469fed9671d518dbeddd6bd48d96cc9158675..51859b8edd7eff3845ca3d5b5b0d900583736d4a 100644 (file)
@@ -203,7 +203,6 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
 
        *arg = (unsigned long) head;
        rcu_assign_pointer(tp->root, new);
-       call_rcu(&head->rcu, mall_destroy_rcu);
        return 0;
 
 err_replace_hw_filter:
index a9708da28eb53ff2987264c6c7d7ca6ec2ff09e9..95238284c422e23fbd834a3aedc54c9243743d0f 100644 (file)
@@ -1176,7 +1176,9 @@ void sctp_assoc_update(struct sctp_association *asoc,
 
                asoc->ctsn_ack_point = asoc->next_tsn - 1;
                asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
-               if (!asoc->stream) {
+
+               if (sctp_state(asoc, COOKIE_WAIT)) {
+                       sctp_stream_free(asoc->stream);
                        asoc->stream = new->stream;
                        new->stream = NULL;
                }
index 0e06a278d2a911e2360e75e983b623e453284b7b..ba9ad32fc44740b9ec45d95e265d3d895148d7a7 100644 (file)
@@ -473,15 +473,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
                             struct sctp_association **app,
                             struct sctp_transport **tpp)
 {
+       struct sctp_init_chunk *chunkhdr, _chunkhdr;
        union sctp_addr saddr;
        union sctp_addr daddr;
        struct sctp_af *af;
        struct sock *sk = NULL;
        struct sctp_association *asoc;
        struct sctp_transport *transport = NULL;
-       struct sctp_init_chunk *chunkhdr;
        __u32 vtag = ntohl(sctphdr->vtag);
-       int len = skb->len - ((void *)sctphdr - (void *)skb->data);
 
        *app = NULL; *tpp = NULL;
 
@@ -516,13 +515,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
         * discard the packet.
         */
        if (vtag == 0) {
-               chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
-               if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
-                         + sizeof(__be32) ||
+               /* chunk header + first 4 octects of init header */
+               chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
+                                             sizeof(struct sctphdr),
+                                             sizeof(struct sctp_chunkhdr) +
+                                             sizeof(__be32), &_chunkhdr);
+               if (!chunkhdr ||
                    chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
-                   ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
+                   ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
                        goto out;
-               }
+
        } else if (vtag != asoc->c.peer_vtag) {
                goto out;
        }
index 8a08f13469c4cc26aeec55de6793b2c2e3c562a6..92e332e173914f16c10ba54038dbb78dee0d0c49 100644 (file)
@@ -2454,16 +2454,11 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
         * stream sequence number shall be set to 0.
         */
 
-       /* Allocate storage for the negotiated streams if it is not a temporary
-        * association.
-        */
-       if (!asoc->temp) {
-               if (sctp_stream_init(asoc, gfp))
-                       goto clean_up;
+       if (sctp_stream_init(asoc, gfp))
+               goto clean_up;
 
-               if (sctp_assoc_set_id(asoc, gfp))
-                       goto clean_up;
-       }
+       if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
+               goto clean_up;
 
        /* ADDIP Section 4.1 ASCONF Chunk Procedures
         *
index 4f5e6cfc7f601b4de8db8802668d553f1af8491d..f863b5573e42d6f1c4908bf045af0079edc4b2d9 100644 (file)
@@ -2088,6 +2088,9 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
                }
        }
 
+       /* Set temp so that it won't be added into hashtable */
+       new_asoc->temp = 1;
+
        /* Compare the tie_tag in cookie with the verification tag of
         * current association.
         */
index 24fedd4b117e8f61cf500f629f2b47f2bc53e76f..03f6b5840764dcc2c486015edd8dc7de4cd84b26 100644 (file)
@@ -119,11 +119,9 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
 
        for (i = 0; i < (reqs << 1); i++) {
                rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
-               if (!rqst) {
-                       pr_err("RPC:       %s: Failed to create bc rpc_rqst\n",
-                              __func__);
+               if (!rqst)
                        goto out_free;
-               }
+
                dprintk("RPC:       %s: new rqst %p\n", __func__, rqst);
 
                rqst->rq_xprt = &r_xprt->rx_xprt;
index 16aff8ddc16f8f3e66e31a86ce227b3ac49857bf..d5b54c020decdc2665d671f34d74dd809aa6682a 100644 (file)
@@ -2432,7 +2432,12 @@ static void xs_tcp_setup_socket(struct work_struct *work)
        case -ENETUNREACH:
        case -EADDRINUSE:
        case -ENOBUFS:
-               /* retry with existing socket, after a delay */
+               /*
+                * xs_tcp_force_close() wakes tasks with -EIO.
+                * We need to wake them first to ensure the
+                * correct error code.
+                */
+               xprt_wake_pending_tasks(xprt, status);
                xs_tcp_force_close(xprt);
                goto out;
        }
index 6f7f6757ceefb500551fafbf40c462835c4baf88..dfc8c51e4d74ec378a338ab9bb2560b3811f393b 100644 (file)
@@ -1540,8 +1540,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
        long timeout;
        int err;
        struct vsock_transport_send_notify_data send_data;
-
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        sk = sock->sk;
        vsk = vsock_sk(sk);
@@ -1584,11 +1583,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
        if (err < 0)
                goto out;
 
-
        while (total_written < len) {
                ssize_t written;
 
-               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+               add_wait_queue(sk_sleep(sk), &wait);
                while (vsock_stream_has_space(vsk) == 0 &&
                       sk->sk_err == 0 &&
                       !(sk->sk_shutdown & SEND_SHUTDOWN) &&
@@ -1597,33 +1595,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
                        /* Don't wait for non-blocking sockets. */
                        if (timeout == 0) {
                                err = -EAGAIN;
-                               finish_wait(sk_sleep(sk), &wait);
+                               remove_wait_queue(sk_sleep(sk), &wait);
                                goto out_err;
                        }
 
                        err = transport->notify_send_pre_block(vsk, &send_data);
                        if (err < 0) {
-                               finish_wait(sk_sleep(sk), &wait);
+                               remove_wait_queue(sk_sleep(sk), &wait);
                                goto out_err;
                        }
 
                        release_sock(sk);
-                       timeout = schedule_timeout(timeout);
+                       timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
                        lock_sock(sk);
                        if (signal_pending(current)) {
                                err = sock_intr_errno(timeout);
-                               finish_wait(sk_sleep(sk), &wait);
+                               remove_wait_queue(sk_sleep(sk), &wait);
                                goto out_err;
                        } else if (timeout == 0) {
                                err = -EAGAIN;
-                               finish_wait(sk_sleep(sk), &wait);
+                               remove_wait_queue(sk_sleep(sk), &wait);
                                goto out_err;
                        }
-
-                       prepare_to_wait(sk_sleep(sk), &wait,
-                                       TASK_INTERRUPTIBLE);
                }
-               finish_wait(sk_sleep(sk), &wait);
+               remove_wait_queue(sk_sleep(sk), &wait);
 
                /* These checks occur both as part of and after the loop
                 * conditional since we need to check before and after
index 14d5f0c8c45ff07224f2dc8e6310c2edc440874e..9f0901f3e42b60db2c6aee7927b2b0ddc0ab7759 100644 (file)
@@ -322,9 +322,9 @@ cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid)
 {
        struct cfg80211_sched_scan_request *pos;
 
-       ASSERT_RTNL();
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
 
-       list_for_each_entry(pos, &rdev->sched_scan_req_list, list) {
+       list_for_each_entry_rcu(pos, &rdev->sched_scan_req_list, list) {
                if (pos->reqid == reqid)
                        return pos;
        }
@@ -398,13 +398,13 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid)
        trace_cfg80211_sched_scan_results(wiphy, reqid);
        /* ignore if we're not scanning */
 
-       rtnl_lock();
+       rcu_read_lock();
        request = cfg80211_find_sched_scan_req(rdev, reqid);
        if (request) {
                request->report_results = true;
                queue_work(cfg80211_wq, &rdev->sched_scan_res_wk);
        }
-       rtnl_unlock();
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_results);
 
index 7198373e29206a0f19f006578596fea78e82f66e..4992f1025c9d3749bddd5f22948d2dad791aa9a9 100644 (file)
@@ -454,6 +454,8 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
        if (iftype == NL80211_IFTYPE_MESH_POINT)
                skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
 
+       mesh_flags &= MESH_FLAGS_AE;
+
        switch (hdr->frame_control &
                cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
        case cpu_to_le16(IEEE80211_FCTL_TODS):
@@ -469,9 +471,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
                             iftype != NL80211_IFTYPE_STATION))
                        return -1;
                if (iftype == NL80211_IFTYPE_MESH_POINT) {
-                       if (mesh_flags & MESH_FLAGS_AE_A4)
+                       if (mesh_flags == MESH_FLAGS_AE_A4)
                                return -1;
-                       if (mesh_flags & MESH_FLAGS_AE_A5_A6) {
+                       if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
                                skb_copy_bits(skb, hdrlen +
                                        offsetof(struct ieee80211s_hdr, eaddr1),
                                        tmp.h_dest, 2 * ETH_ALEN);
@@ -487,9 +489,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
                     ether_addr_equal(tmp.h_source, addr)))
                        return -1;
                if (iftype == NL80211_IFTYPE_MESH_POINT) {
-                       if (mesh_flags & MESH_FLAGS_AE_A5_A6)
+                       if (mesh_flags == MESH_FLAGS_AE_A5_A6)
                                return -1;
-                       if (mesh_flags & MESH_FLAGS_AE_A4)
+                       if (mesh_flags == MESH_FLAGS_AE_A4)
                                skb_copy_bits(skb, hdrlen +
                                        offsetof(struct ieee80211s_hdr, eaddr1),
                                        tmp.h_source, ETH_ALEN);
index 8ec8a3fcf8d4740670b16c2c1af099ef0beaa5c5..574e6f32f94f29a496ef20f1a673a0e718a1a31b 100644 (file)
@@ -170,7 +170,7 @@ static int xfrm_dev_feat_change(struct net_device *dev)
 
 static int xfrm_dev_down(struct net_device *dev)
 {
-       if (dev->hw_features & NETIF_F_HW_ESP)
+       if (dev->features & NETIF_F_HW_ESP)
                xfrm_dev_state_flush(dev_net(dev), dev, true);
 
        xfrm_garbage_collect(dev_net(dev));
index b00a1d5a7f52e54f2ac454da3eeb14447f84d8b2..ed4e52d95172e2d8dcca603cdf62cfb55517f3c9 100644 (file)
@@ -1797,43 +1797,6 @@ free_dst:
        goto out;
 }
 
-#ifdef CONFIG_XFRM_SUB_POLICY
-static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
-{
-       if (!*target) {
-               *target = kmalloc(size, GFP_ATOMIC);
-               if (!*target)
-                       return -ENOMEM;
-       }
-
-       memcpy(*target, src, size);
-       return 0;
-}
-#endif
-
-static int xfrm_dst_update_parent(struct dst_entry *dst,
-                                 const struct xfrm_selector *sel)
-{
-#ifdef CONFIG_XFRM_SUB_POLICY
-       struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
-       return xfrm_dst_alloc_copy((void **)&(xdst->partner),
-                                  sel, sizeof(*sel));
-#else
-       return 0;
-#endif
-}
-
-static int xfrm_dst_update_origin(struct dst_entry *dst,
-                                 const struct flowi *fl)
-{
-#ifdef CONFIG_XFRM_SUB_POLICY
-       struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
-       return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
-#else
-       return 0;
-#endif
-}
-
 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
                                struct xfrm_policy **pols,
                                int *num_pols, int *num_xfrms)
@@ -1905,16 +1868,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 
        xdst = (struct xfrm_dst *)dst;
        xdst->num_xfrms = err;
-       if (num_pols > 1)
-               err = xfrm_dst_update_parent(dst, &pols[1]->selector);
-       else
-               err = xfrm_dst_update_origin(dst, fl);
-       if (unlikely(err)) {
-               dst_free(dst);
-               XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
-               return ERR_PTR(err);
-       }
-
        xdst->num_pols = num_pols;
        memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
        xdst->policy_genid = atomic_read(&pols[0]->genid);
index fc3c5aa387543a63bf18955e60309207bf06e961..2e291bc5f1fc1003ca62e40f67da8a6a46875c02 100644 (file)
@@ -1383,6 +1383,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
        x->curlft.add_time = orig->curlft.add_time;
        x->km.state = orig->km.state;
        x->km.seq = orig->km.seq;
+       x->replay = orig->replay;
+       x->preplay = orig->preplay;
 
        return x;
 
index f9b92ece78343a463a37d33e3f73fa8621b84444..5afd1098e33a173a18b2310aa24d205534df711c 100644 (file)
@@ -23,10 +23,11 @@ class LxDmesg(gdb.Command):
         super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
 
     def invoke(self, arg, from_tty):
-        log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16)
-        log_first_idx = int(gdb.parse_and_eval("log_first_idx"))
-        log_next_idx = int(gdb.parse_and_eval("log_next_idx"))
-        log_buf_len = int(gdb.parse_and_eval("log_buf_len"))
+        log_buf_addr = int(str(gdb.parse_and_eval(
+            "'printk.c'::log_buf")).split()[0], 16)
+        log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
+        log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
+        log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
 
         inf = gdb.inferiors()[0]
         start = log_buf_addr + log_first_idx
index 6ebd3e6a1fd12d3202067020b48446fd9bdcff98..5e3c673fa3f4435b027fbc634815be10c26cf39f 100644 (file)
@@ -27,6 +27,8 @@
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_READONLY_MEM
 
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
 #define KVM_REG_SIZE(id)                                               \
        (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
 
@@ -114,6 +116,8 @@ struct kvm_debug_exit_arch {
 };
 
 struct kvm_sync_regs {
+       /* Used with KVM_CAP_ARM_USER_IRQ */
+       __u64 device_irq_level;
 };
 
 struct kvm_arch_memory_slot {
@@ -192,13 +196,17 @@ struct kvm_arch_memory_slot {
 #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
 #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
 #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO  7
+#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS  8
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
                        (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
 #define VGIC_LEVEL_INFO_LINE_LEVEL     0
 
-#define   KVM_DEV_ARM_VGIC_CTRL_INIT    0
+#define   KVM_DEV_ARM_VGIC_CTRL_INIT           0
+#define   KVM_DEV_ARM_ITS_SAVE_TABLES          1
+#define   KVM_DEV_ARM_ITS_RESTORE_TABLES       2
+#define   KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
 
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_TYPE_SHIFT         24
index c2860358ae3e0c3271d9ca4b944351986276e397..70eea2ecc6631cab3d718c3958cd0513a7f7e6a3 100644 (file)
@@ -39,6 +39,8 @@
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_READONLY_MEM
 
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
 #define KVM_REG_SIZE(id)                                               \
        (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
 
@@ -143,6 +145,8 @@ struct kvm_debug_exit_arch {
 #define KVM_GUESTDBG_USE_HW            (1 << 17)
 
 struct kvm_sync_regs {
+       /* Used with KVM_CAP_ARM_USER_IRQ */
+       __u64 device_irq_level;
 };
 
 struct kvm_arch_memory_slot {
@@ -212,13 +216,17 @@ struct kvm_arch_memory_slot {
 #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
 #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
 #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO  7
+#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
                        (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
 #define VGIC_LEVEL_INFO_LINE_LEVEL     0
 
-#define   KVM_DEV_ARM_VGIC_CTRL_INIT   0
+#define   KVM_DEV_ARM_VGIC_CTRL_INIT           0
+#define   KVM_DEV_ARM_ITS_SAVE_TABLES           1
+#define   KVM_DEV_ARM_ITS_RESTORE_TABLES        2
+#define   KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
 
 /* Device Control API on vcpu fd */
 #define KVM_ARM_VCPU_PMU_V3_CTRL       0
index 4edbe4bb0e8b0cdd2553c74df9988823990833d9..07fbeb927834f3a96278414aedaa59ea580ae8de 100644 (file)
@@ -29,6 +29,9 @@
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_GUEST_DEBUG
 
+/* Not always available, but if it is, this is the correct offset.  */
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
 struct kvm_regs {
        __u64 pc;
        __u64 cr;
index 7f4fd65e9208514b99112c51f0e018f3f441712f..3dd2a1d308dd0b92c36c3c5ca5276fd19838f252 100644 (file)
@@ -26,6 +26,8 @@
 #define KVM_DEV_FLIC_ADAPTER_REGISTER  6
 #define KVM_DEV_FLIC_ADAPTER_MODIFY    7
 #define KVM_DEV_FLIC_CLEAR_IO_IRQ      8
+#define KVM_DEV_FLIC_AISM              9
+#define KVM_DEV_FLIC_AIRQ_INJECT       10
 /*
  * We can have up to 4*64k pending subchannels + 8 adapter interrupts,
  * as well as up  to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
@@ -41,7 +43,14 @@ struct kvm_s390_io_adapter {
        __u8 isc;
        __u8 maskable;
        __u8 swap;
-       __u8 pad;
+       __u8 flags;
+};
+
+#define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01
+
+struct kvm_s390_ais_req {
+       __u8 isc;
+       __u16 mode;
 };
 
 #define KVM_S390_IO_ADAPTER_MASK 1
@@ -110,6 +119,7 @@ struct kvm_s390_vm_cpu_machine {
 #define KVM_S390_VM_CPU_FEAT_CMMA      10
 #define KVM_S390_VM_CPU_FEAT_PFMFI     11
 #define KVM_S390_VM_CPU_FEAT_SIGPIF    12
+#define KVM_S390_VM_CPU_FEAT_KSS       13
 struct kvm_s390_vm_cpu_feat {
        __u64 feat[16];
 };
@@ -198,6 +208,10 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_VRS    (1UL << 6)
 #define KVM_SYNC_RICCB  (1UL << 7)
 #define KVM_SYNC_FPRS   (1UL << 8)
+#define KVM_SYNC_GSCB   (1UL << 9)
+/* length and alignment of the sdnx as a power of two */
+#define SDNXC 8
+#define SDNXL (1UL << SDNXC)
 /* definition of registers in kvm_run */
 struct kvm_sync_regs {
        __u64 prefix;   /* prefix register */
@@ -218,8 +232,16 @@ struct kvm_sync_regs {
        };
        __u8  reserved[512];    /* for future vector expansion */
        __u32 fpc;              /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
-       __u8 padding[52];       /* riccb needs to be 64byte aligned */
+       __u8 padding1[52];      /* riccb needs to be 64byte aligned */
        __u8 riccb[64];         /* runtime instrumentation controls block */
+       __u8 padding2[192];     /* sdnx needs to be 256byte aligned */
+       union {
+               __u8 sdnx[SDNXL];  /* state description annex */
+               struct {
+                       __u64 reserved1[2];
+                       __u64 gscb[4];
+               };
+       };
 };
 
 #define KVM_REG_S390_TODPR     (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
index 0fe00446f9cac8c16e1ae3fcabc4a469b772597b..2701e5f8145bd250c3a35dc12cab5839e16ff30d 100644 (file)
 #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
 
+#define X86_FEATURE_MBA         ( 7*32+18) /* Memory Bandwidth Allocation */
+
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
 #define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
index 85599ad4d0247863cef655d02b9a4b3f83c77fb7..5dff775af7cd6456f7177d9ce5888ae78dc6bc10 100644 (file)
 # define DISABLE_OSPKE         (1<<(X86_FEATURE_OSPKE & 31))
 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 
+#ifdef CONFIG_X86_5LEVEL
+# define DISABLE_LA57  0
+#else
+# define DISABLE_LA57  (1<<(X86_FEATURE_LA57 & 31))
+#endif
+
 /*
  * Make sure to add features to the correct mask
  */
@@ -55,7 +61,7 @@
 #define DISABLED_MASK13        0
 #define DISABLED_MASK14        0
 #define DISABLED_MASK15        0
-#define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE)
+#define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
 #define DISABLED_MASK17        0
 #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
 
index fac9a5c0abe94b233b72b35bca8c7a665847b694..d91ba04dd00709b7e549ced791759c12cf70d5d0 100644 (file)
 # define NEED_MOVBE    0
 #endif
 
+#ifdef CONFIG_X86_5LEVEL
+# define NEED_LA57     (1<<(X86_FEATURE_LA57 & 31))
+#else
+# define NEED_LA57     0
+#endif
+
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_PARAVIRT
 /* Paravirtualized systems may not have PSE or PGE available */
 #define REQUIRED_MASK13        0
 #define REQUIRED_MASK14        0
 #define REQUIRED_MASK15        0
-#define REQUIRED_MASK16        0
+#define REQUIRED_MASK16        (NEED_LA57)
 #define REQUIRED_MASK17        0
 #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
 
index 739c0c5940226d7af38d2ab4bc068f7772f1a8c7..c2824d02ba3762553b62a07709058102050e34da 100644 (file)
@@ -9,6 +9,9 @@
 #include <linux/types.h>
 #include <linux/ioctl.h>
 
+#define KVM_PIO_PAGE_OFFSET 1
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
+
 #define DE_VECTOR 0
 #define DB_VECTOR 1
 #define BP_VECTOR 3
index 14458658e988bb6c9333e73701ea55e7b6525a92..690a2dcf407860cf54793f03e69d8761b06f068d 100644 (file)
 #define EXIT_REASON_WBINVD              54
 #define EXIT_REASON_XSETBV              55
 #define EXIT_REASON_APIC_WRITE          56
+#define EXIT_REASON_RDRAND              57
 #define EXIT_REASON_INVPCID             58
+#define EXIT_REASON_VMFUNC              59
+#define EXIT_REASON_ENCLS               60
+#define EXIT_REASON_RDSEED              61
 #define EXIT_REASON_PML_FULL            62
 #define EXIT_REASON_XSAVES              63
 #define EXIT_REASON_XRSTORS             64
@@ -90,6 +94,7 @@
        { EXIT_REASON_TASK_SWITCH,           "TASK_SWITCH" }, \
        { EXIT_REASON_CPUID,                 "CPUID" }, \
        { EXIT_REASON_HLT,                   "HLT" }, \
+       { EXIT_REASON_INVD,                  "INVD" }, \
        { EXIT_REASON_INVLPG,                "INVLPG" }, \
        { EXIT_REASON_RDPMC,                 "RDPMC" }, \
        { EXIT_REASON_RDTSC,                 "RDTSC" }, \
        { EXIT_REASON_IO_INSTRUCTION,        "IO_INSTRUCTION" }, \
        { EXIT_REASON_MSR_READ,              "MSR_READ" }, \
        { EXIT_REASON_MSR_WRITE,             "MSR_WRITE" }, \
+       { EXIT_REASON_INVALID_STATE,         "INVALID_STATE" }, \
+       { EXIT_REASON_MSR_LOAD_FAIL,         "MSR_LOAD_FAIL" }, \
        { EXIT_REASON_MWAIT_INSTRUCTION,     "MWAIT_INSTRUCTION" }, \
        { EXIT_REASON_MONITOR_TRAP_FLAG,     "MONITOR_TRAP_FLAG" }, \
        { EXIT_REASON_MONITOR_INSTRUCTION,   "MONITOR_INSTRUCTION" }, \
        { EXIT_REASON_MCE_DURING_VMENTRY,    "MCE_DURING_VMENTRY" }, \
        { EXIT_REASON_TPR_BELOW_THRESHOLD,   "TPR_BELOW_THRESHOLD" }, \
        { EXIT_REASON_APIC_ACCESS,           "APIC_ACCESS" }, \
-       { EXIT_REASON_GDTR_IDTR,             "GDTR_IDTR" }, \
-       { EXIT_REASON_LDTR_TR,               "LDTR_TR" }, \
+       { EXIT_REASON_EOI_INDUCED,           "EOI_INDUCED" }, \
+       { EXIT_REASON_GDTR_IDTR,             "GDTR_IDTR" }, \
+       { EXIT_REASON_LDTR_TR,               "LDTR_TR" }, \
        { EXIT_REASON_EPT_VIOLATION,         "EPT_VIOLATION" }, \
        { EXIT_REASON_EPT_MISCONFIG,         "EPT_MISCONFIG" }, \
        { EXIT_REASON_INVEPT,                "INVEPT" }, \
+       { EXIT_REASON_RDTSCP,                "RDTSCP" }, \
        { EXIT_REASON_PREEMPTION_TIMER,      "PREEMPTION_TIMER" }, \
+       { EXIT_REASON_INVVPID,               "INVVPID" }, \
        { EXIT_REASON_WBINVD,                "WBINVD" }, \
+       { EXIT_REASON_XSETBV,                "XSETBV" }, \
        { EXIT_REASON_APIC_WRITE,            "APIC_WRITE" }, \
-       { EXIT_REASON_EOI_INDUCED,           "EOI_INDUCED" }, \
-       { EXIT_REASON_INVALID_STATE,         "INVALID_STATE" }, \
-       { EXIT_REASON_MSR_LOAD_FAIL,         "MSR_LOAD_FAIL" }, \
-       { EXIT_REASON_INVD,                  "INVD" }, \
-       { EXIT_REASON_INVVPID,               "INVVPID" }, \
+       { EXIT_REASON_RDRAND,                "RDRAND" }, \
        { EXIT_REASON_INVPCID,               "INVPCID" }, \
+       { EXIT_REASON_VMFUNC,                "VMFUNC" }, \
+       { EXIT_REASON_ENCLS,                 "ENCLS" }, \
+       { EXIT_REASON_RDSEED,                "RDSEED" }, \
+       { EXIT_REASON_PML_FULL,              "PML_FULL" }, \
        { EXIT_REASON_XSAVES,                "XSAVES" }, \
        { EXIT_REASON_XRSTORS,               "XRSTORS" }
 
index 390d7c9685fd6107c83be2296ead9cb198b571a3..4ce25d43e8e305df8afcfc6fb1747a58fad20c1b 100644 (file)
                .off   = OFF,                                   \
                .imm   = IMM })
 
+/* Unconditional jumps, goto pc + off16 */
+
+#define BPF_JMP_A(OFF)                                         \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_JA,                      \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
 /* Function call */
 
 #define BPF_EMIT_CALL(FUNC)                                    \
index d538897b8e08bb4358c56148c189fc7b8128a9da..17b10304c393355da9ed2da743107a5c59748290 100644 (file)
  * tv_sec holds the number of seconds before (negative) or after (positive)
  * 00:00:00 1st January 1970 UTC.
  *
- * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is
- * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time.
- *
- * Note that if both tv_sec and tv_nsec are non-zero, then the two values must
- * either be both positive or both negative.
+ * tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time.
  *
  * __reserved is held in case we need a yet finer resolution.
  */
 struct statx_timestamp {
        __s64   tv_sec;
-       __s32   tv_nsec;
+       __u32   tv_nsec;
        __s32   __reserved;
 };
 
index cb0eda3925e6d8562da9d3091147c5fca602ebbf..3517e204a2b30754e430213c41b1d48e51d9b52c 100644 (file)
@@ -311,6 +311,10 @@ include::itrace.txt[]
        Set the maximum number of program blocks to print with brstackasm for
        each sample.
 
+--inline::
+       If a callgraph address belongs to an inlined function, the inline stack
+       will be printed. Each entry has function name and file/line.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-script-perl[1],
index d05aec491cff22189d7fe763120884a639cd2cfb..4761b0d7fcb5b2586e7fd86c1ed8b219d0cec987 100644 (file)
@@ -2494,6 +2494,8 @@ int cmd_script(int argc, const char **argv)
                        "Enable kernel symbol demangling"),
        OPT_STRING(0, "time", &script.time_str, "str",
                   "Time span of interest (start,stop)"),
+       OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
+                   "Show inline function"),
        OPT_END()
        };
        const char * const script_subcommands[] = { "record", "report", NULL };
index 59addd52d9cd51ab2d8789989a9d377d2ce23eb5..ddb2c6fbdf919e8124ffb40eee47c801b43bd6c6 100644 (file)
@@ -210,6 +210,8 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
                        return 0;
 
                ret = b->callchain->max_depth - a->callchain->max_depth;
+               if (callchain_param.order == ORDER_CALLER)
+                       ret = -ret;
        }
        return ret;
 }
index 81fc29ac798facf789712d81147eacda0c01552d..b4204b43ed58a83c9530ae3b75df4c01004b442e 100644 (file)
@@ -621,14 +621,19 @@ enum match_result {
 static enum match_result match_chain_srcline(struct callchain_cursor_node *node,
                                             struct callchain_list *cnode)
 {
-       char *left = get_srcline(cnode->ms.map->dso,
+       char *left = NULL;
+       char *right = NULL;
+       enum match_result ret = MATCH_EQ;
+       int cmp;
+
+       if (cnode->ms.map)
+               left = get_srcline(cnode->ms.map->dso,
                                 map__rip_2objdump(cnode->ms.map, cnode->ip),
                                 cnode->ms.sym, true, false);
-       char *right = get_srcline(node->map->dso,
+       if (node->map)
+               right = get_srcline(node->map->dso,
                                  map__rip_2objdump(node->map, node->ip),
                                  node->sym, true, false);
-       enum match_result ret = MATCH_EQ;
-       int cmp;
 
        if (left && right)
                cmp = strcmp(left, right);
index e415aee6a24520f3c88e9ce70d621b39109896b6..583f3a602506f29f198d7153b8ad8ee8073a6fd9 100644 (file)
@@ -7,6 +7,7 @@
 #include "map.h"
 #include "strlist.h"
 #include "symbol.h"
+#include "srcline.h"
 
 static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
 {
@@ -168,6 +169,38 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
                        if (!print_oneline)
                                printed += fprintf(fp, "\n");
 
+                       if (symbol_conf.inline_name && node->map) {
+                               struct inline_node *inode;
+
+                               addr = map__rip_2objdump(node->map, node->ip),
+                               inode = dso__parse_addr_inlines(node->map->dso, addr);
+
+                               if (inode) {
+                                       struct inline_list *ilist;
+
+                                       list_for_each_entry(ilist, &inode->val, list) {
+                                               if (print_arrow)
+                                                       printed += fprintf(fp, " <-");
+
+                                               /* IP is same, just skip it */
+                                               if (print_ip)
+                                                       printed += fprintf(fp, "%c%16s",
+                                                                          s, "");
+                                               if (print_sym)
+                                                       printed += fprintf(fp, " %s",
+                                                                          ilist->funcname);
+                                               if (print_srcline)
+                                                       printed += fprintf(fp, "\n  %s:%d",
+                                                                          ilist->filename,
+                                                                          ilist->line_nr);
+                                               if (!print_oneline)
+                                                       printed += fprintf(fp, "\n");
+                                       }
+
+                                       inline_node__delete(inode);
+                               }
+                       }
+
                        if (symbol_conf.bt_stop_list &&
                            node->sym &&
                            strlist__has_entry(symbol_conf.bt_stop_list,
index df051a52393c1de8e85f46d43f567755888133e7..ebc88a74e67b7129cbbd6790c34f50dcefec0de1 100644 (file)
@@ -56,7 +56,10 @@ static int inline_list__append(char *filename, char *funcname, int line_nr,
                }
        }
 
-       list_add_tail(&ilist->list, &node->val);
+       if (callchain_param.order == ORDER_CALLEE)
+               list_add_tail(&ilist->list, &node->val);
+       else
+               list_add(&ilist->list, &node->val);
 
        return 0;
 }
@@ -200,12 +203,14 @@ static void addr2line_cleanup(struct a2l_data *a2l)
 
 #define MAX_INLINE_NEST 1024
 
-static void inline_list__reverse(struct inline_node *node)
+static int inline_list__append_dso_a2l(struct dso *dso,
+                                      struct inline_node *node)
 {
-       struct inline_list *ilist, *n;
+       struct a2l_data *a2l = dso->a2l;
+       char *funcname = a2l->funcname ? strdup(a2l->funcname) : NULL;
+       char *filename = a2l->filename ? strdup(a2l->filename) : NULL;
 
-       list_for_each_entry_safe_reverse(ilist, n, &node->val, list)
-               list_move_tail(&ilist->list, &node->val);
+       return inline_list__append(filename, funcname, a2l->line, node, dso);
 }
 
 static int addr2line(const char *dso_name, u64 addr,
@@ -230,36 +235,36 @@ static int addr2line(const char *dso_name, u64 addr,
 
        bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
 
-       if (a2l->found && unwind_inlines) {
+       if (!a2l->found)
+               return 0;
+
+       if (unwind_inlines) {
                int cnt = 0;
 
+               if (node && inline_list__append_dso_a2l(dso, node))
+                       return 0;
+
                while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
                                             &a2l->funcname, &a2l->line) &&
                       cnt++ < MAX_INLINE_NEST) {
 
                        if (node != NULL) {
-                               if (inline_list__append(strdup(a2l->filename),
-                                                       strdup(a2l->funcname),
-                                                       a2l->line, node,
-                                                       dso) != 0)
+                               if (inline_list__append_dso_a2l(dso, node))
                                        return 0;
+                               // found at least one inline frame
+                               ret = 1;
                        }
                }
+       }
 
-               if ((node != NULL) &&
-                   (callchain_param.order != ORDER_CALLEE)) {
-                       inline_list__reverse(node);
-               }
+       if (file) {
+               *file = a2l->filename ? strdup(a2l->filename) : NULL;
+               ret = *file ? 1 : 0;
        }
 
-       if (a2l->found && a2l->filename) {
-               *file = strdup(a2l->filename);
+       if (line)
                *line = a2l->line;
 
-               if (*file)
-                       ret = 1;
-       }
-
        return ret;
 }
 
@@ -278,8 +283,6 @@ void dso__free_a2l(struct dso *dso)
 static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
        struct dso *dso)
 {
-       char *file = NULL;
-       unsigned int line = 0;
        struct inline_node *node;
 
        node = zalloc(sizeof(*node));
@@ -291,7 +294,7 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
        INIT_LIST_HEAD(&node->val);
        node->addr = addr;
 
-       if (!addr2line(dso_name, addr, &file, &line, dso, TRUE, node))
+       if (!addr2line(dso_name, addr, NULL, NULL, dso, TRUE, node))
                goto out_free_inline_node;
 
        if (list_empty(&node->val))
index f90e11a555b208302f9dd2a4163d73051e0d2f47..943a06291587b064c3649569dd85b356805a7fab 100644 (file)
@@ -168,12 +168,16 @@ frame_callback(Dwfl_Frame *state, void *arg)
 {
        struct unwind_info *ui = arg;
        Dwarf_Addr pc;
+       bool isactivation;
 
-       if (!dwfl_frame_pc(state, &pc, NULL)) {
+       if (!dwfl_frame_pc(state, &pc, &isactivation)) {
                pr_err("%s", dwfl_errmsg(-1));
                return DWARF_CB_ABORT;
        }
 
+       if (!isactivation)
+               --pc;
+
        return entry(pc, ui) || !(--ui->max_stack) ?
               DWARF_CB_ABORT : DWARF_CB_OK;
 }
index f8455bed6e653705183b878c40d294991edf4e92..672c2ada9357a25054b4b1ba7eeec643505984e2 100644 (file)
@@ -692,6 +692,17 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
 
                while (!ret && (unw_step(&c) > 0) && i < max_stack) {
                        unw_get_reg(&c, UNW_REG_IP, &ips[i]);
+
+                       /*
+                        * Decrement the IP for any non-activation frames.
+                        * this is required to properly find the srcline
+                        * for caller frames.
+                        * See also the documentation for dwfl_frame_pc(),
+                        * which this code tries to replicate.
+                        */
+                       if (unw_is_signal_frame(&c) <= 0)
+                               --ips[i];
+
                        ++i;
                }
 
diff --git a/tools/power/acpi/.gitignore b/tools/power/acpi/.gitignore
new file mode 100644 (file)
index 0000000..cba3d99
--- /dev/null
@@ -0,0 +1,4 @@
+acpidbg
+acpidump
+ec
+include
index 3773562056da267aee91878ed8088b3c577a997e..cabb19b1e3718b289910fcc921c698f44f162768 100644 (file)
@@ -49,6 +49,7 @@
 #define MAX_NR_MAPS    4
 
 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS     (1 << 0)
+#define F_LOAD_WITH_STRICT_ALIGNMENT           (1 << 1)
 
 struct bpf_test {
        const char *descr;
@@ -2614,6 +2615,30 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "direct packet access: test17 (pruning, alignment)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+                       BPF_JMP_A(-6),
+               },
+               .errstr = "misaligned packet access off 2+15+-4 size 4",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+       },
        {
                "helper access to packet: test1, valid packet_ptr range",
                .insns = {
@@ -3340,6 +3365,70 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
+       {
+               "alu ops on ptr_to_map_value_or_null, 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "R4 invalid mem access",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+       },
+       {
+               "alu ops on ptr_to_map_value_or_null, 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "R4 invalid mem access",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+       },
+       {
+               "alu ops on ptr_to_map_value_or_null, 3",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "R4 invalid mem access",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+       },
        {
                "invalid memory access with multiple map_lookup_elem calls",
                .insns = {
@@ -4937,7 +5026,149 @@ static struct bpf_test tests[] = {
                .fixup_map_in_map = { 3 },
                .errstr = "R1 type=map_value_or_null expected=map_ptr",
                .result = REJECT,
-       }
+       },
+       {
+               "ld_abs: check calling conv, r1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_abs: check calling conv, r2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R2 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_abs: check calling conv, r3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R3 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_abs: check calling conv, r4",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R4 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_abs: check calling conv, r5",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_5, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R5 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_abs: check calling conv, r7",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_7, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "ld_ind: check calling conv, r1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_ind: check calling conv, r2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R2 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_ind: check calling conv, r3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_3, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R3 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_ind: check calling conv, r4",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_4, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R4 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_ind: check calling conv, r5",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_5, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R5 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_ind: check calling conv, r7",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_7, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -5059,9 +5290,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
 
        do_test_fixup(test, prog, map_fds);
 
-       fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
-                                  prog, prog_len, "GPL", 0, bpf_vlog,
-                                  sizeof(bpf_vlog));
+       fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
+                                    prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
+                                    "GPL", 0, bpf_vlog, sizeof(bpf_vlog));
 
        expected_ret = unpriv && test->result_unpriv != UNDEF ?
                       test->result_unpriv : test->result;
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
new file mode 100644 (file)
index 0000000..f4d1ff7
--- /dev/null
@@ -0,0 +1,21 @@
+#!/bin/sh
+# description: Register/unregister many kprobe events
+
+# ftrace fentry skip size depends on the machine architecture.
+# Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc
+case `uname -m` in
+  x86_64|i[3456]86) OFFS=5;;
+  ppc*) OFFS=4;;
+  *) OFFS=0;;
+esac
+
+echo "Setup up to 256 kprobes"
+grep t /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \
+head -n 256 | while read i; do echo p ${i}+${OFFS} ; done > kprobe_events ||:
+
+echo 1 > events/kprobes/enable
+echo 0 > events/kprobes/enable
+echo > kprobe_events
+echo "Waiting for unoptimizing & freeing"
+sleep 5
+echo "Done"
index d9c49f41515e704994ed1669dbe0a859a6dadd79..e79ccd6aada1a9acf10bb2860e76ddb444415d52 100644 (file)
@@ -42,12 +42,12 @@ int test_body(void)
        printf("Check DSCR TM context switch: ");
        fflush(stdout);
        for (;;) {
-               rv = 1;
                asm __volatile__ (
                        /* set a known value into the DSCR */
                        "ld      3, %[dscr1];"
                        "mtspr   %[sprn_dscr], 3;"
 
+                       "li      %[rv], 1;"
                        /* start and suspend a transaction */
                        "tbegin.;"
                        "beq     1f;"
index c0c48507e44e2992b3624b2374cd08769a8bb607..ad0543e21760562d94bc0a44676700b1179c1787 100644 (file)
@@ -220,6 +220,7 @@ config INITRAMFS_COMPRESSION_LZ4
 endchoice
 
 config INITRAMFS_COMPRESSION
+       depends on INITRAMFS_SOURCE!=""
        string
        default ""      if INITRAMFS_COMPRESSION_NONE
        default ".gz"   if INITRAMFS_COMPRESSION_GZIP