]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 10 Jun 2017 05:28:33 +0000 (22:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 10 Jun 2017 05:28:33 +0000 (22:28 -0700)
Pull input fixes from Dmitry Torokhov:

 - mark "guest" RMI device as pass-through port to avoid "phantom" ALPS
   toouchpad on newer Lenovo Carbons

 - add two more laptops to the Elantech's lists of devices using CRC
   mode

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input:
  Input: synaptics-rmi4 - register F03 port as pass-through serio
  Input: elantech - add Fujitsu Lifebook E546/E557 to force crc_enabled

480 files changed:
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/net/dsa/marvell.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
Documentation/networking/dpaa.txt [new file with mode: 0644]
Documentation/networking/tcp.txt
MAINTAINERS
Makefile
arch/arm/boot/compressed/efi-header.S
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/imx6ul-14x14-evk.dts
arch/arm/boot/dts/keystone-k2l-netcp.dtsi
arch/arm/boot/dts/keystone-k2l.dtsi
arch/arm/boot/dts/versatile-pb.dts
arch/arm/common/mcpm_entry.c
arch/arm/include/asm/pgtable-nommu.h
arch/arm/mach-at91/Kconfig
arch/arm/mach-davinci/pm.c
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
arch/arm64/configs/defconfig
arch/arm64/include/asm/acpi.h
arch/arm64/kernel/pci.c
arch/frv/include/asm/timex.h
arch/mips/kernel/process.c
arch/openrisc/kernel/process.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/64/hash-4k.h
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/topology.h
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/perf/power9-pmu.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/cell/spufs/coredump.c
arch/powerpc/platforms/powernv/subcore.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/sysdev/simple_gpio.c
arch/sparc/Kconfig
arch/sparc/include/asm/mmu_64.h
arch/sparc/include/asm/mmu_context_64.h
arch/sparc/include/asm/pil.h
arch/sparc/include/asm/vio.h
arch/sparc/kernel/ds.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/kernel.h
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/tsb.S
arch/sparc/kernel/ttable_64.S
arch/sparc/kernel/vio.c
arch/sparc/lib/Makefile
arch/sparc/lib/multi3.S [new file with mode: 0644]
arch/sparc/mm/init_64.c
arch/sparc/mm/tsb.c
arch/sparc/mm/ultra.S
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/process_32.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/pat.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
block/bfq-cgroup.c
block/bfq-iosched.c
block/bfq-iosched.h
block/bio-integrity.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk.h
block/cfq-iosched.c
crypto/asymmetric_keys/public_key.c
crypto/drbg.c
crypto/gcm.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/battery.c
drivers/acpi/button.c
drivers/acpi/device_pm.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/ata/ahci.c
drivers/ata/libahci_platform.c
drivers/ata/libata-core.c
drivers/ata/sata_mv.c
drivers/ata/sata_rcar.c
drivers/base/power/main.c
drivers/base/power/wakeup.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/rbd.c
drivers/char/pcmcia/cm4040_cs.c
drivers/char/random.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/dma/ep93xx_dma.c
drivers/dma/mv_xor_v2.c
drivers/dma/pl330.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/sh/usb-dmac.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/efi-bgrt.c
drivers/firmware/efi/libstub/secureboot.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uc.h
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fence.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-pre.c
drivers/hid/Kconfig
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-elecom.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/wacom_wac.c
drivers/hwmon/Kconfig
drivers/hwmon/aspeed-pwm-tacho.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/netlink.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/uverbs_marshall.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/chip_registers.h
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/intr.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/sysfs.c
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_osdep.h
drivers/infiniband/hw/i40iw/i40iw_type.h
drivers/infiniband/hw/i40iw/i40iw_utils.c
drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/mISDN/stack.c
drivers/md/bitmap.c
drivers/md/dm-bufio.c
drivers/md/dm-integrity.c
drivers/md/dm-ioctl.c
drivers/md/dm-raid1.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-verity-target.c
drivers/md/dm.c
drivers/md/md-cluster.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/media/Kconfig
drivers/media/Makefile
drivers/media/cec/Kconfig
drivers/media/cec/Makefile
drivers/media/cec/cec-adap.c
drivers/media/cec/cec-core.c
drivers/media/i2c/Kconfig
drivers/media/platform/Kconfig
drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
drivers/media/platform/vivid/Kconfig
drivers/media/rc/rc-ir-raw.c
drivers/media/usb/pulse8-cec/Kconfig
drivers/media/usb/rainshadow-cec/Kconfig
drivers/media/usb/rainshadow-cec/rainshadow-cec.c
drivers/memory/atmel-ebi.c
drivers/misc/cxl/file.c
drivers/misc/cxl/native.c
drivers/misc/sgi-xp/xp.h
drivers/misc/sgi-xp/xp_main.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/nand_samsung.c
drivers/mtd/nand/tango_nand.c
drivers/net/dsa/mv88e6xxx/global2.h
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qualcomm/emac/emac-mac.c
drivers/net/ethernet/qualcomm/emac/emac-phy.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/geneve.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/perf/arm_pmu_acpi.c
drivers/pinctrl/core.c
drivers/pinctrl/freescale/pinctrl-mxs.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/pinconf-generic.c
drivers/pinctrl/pinmux.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
drivers/reset/hisilicon/hi6220_reset.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/qedi/qedi.h
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qedi/qedi_main.c
drivers/staging/media/atomisp/i2c/Makefile
drivers/staging/media/atomisp/i2c/imx/Makefile
drivers/staging/media/atomisp/i2c/ov5693/Makefile
drivers/staging/media/atomisp/pci/atomisp2/Makefile
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl0.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/tty/tty_port.c
drivers/xen/privcmd.c
fs/dax.c
fs/gfs2/log.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/internal.h
fs/nfs/namespace.c
fs/nfs/nfs42proc.c
fs/nfs/nfs4client.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/super.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfsxdr.c
fs/ntfs/namei.c
fs/ocfs2/export.c
fs/overlayfs/Kconfig
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/proc/base.c
fs/reiserfs/journal.c
fs/ufs/super.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
include/drm/drm_dp_helper.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/compiler-clang.h
include/linux/elevator.h
include/linux/gfp.h
include/linux/gpio/machine.h
include/linux/jiffies.h
include/linux/memblock.h
include/linux/mlx4/qp.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/pinctrl/pinconf-generic.h
include/linux/sunrpc/svc.h
include/linux/suspend.h
include/media/cec-notifier.h
include/media/cec.h
include/net/ipv6.h
include/net/tcp.h
include/rdma/ib_sa.h
include/rdma/rdma_netlink.h
include/target/iscsi/iscsi_target_core.h
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/livepatch/Kconfig
kernel/power/process.c
kernel/power/suspend.c
kernel/printk/printk.c
mm/gup.c
mm/hugetlb.c
mm/ksm.c
mm/memblock.c
mm/memory-failure.c
mm/memory.c
mm/mlock.c
mm/page_alloc.c
mm/slub.c
mm/util.c
net/bridge/br_netlink.c
net/bridge/br_stp_if.c
net/core/devlink.c
net/core/skbuff.c
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/legacy.c
net/ipv4/af_inet.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv6/calipso.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_tunnel.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/xfrm6_mode_ro.c
net/ipv6/xfrm6_mode_transport.c
net/mac80211/agg-tx.c
net/mac80211/ht.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mpls/af_mpls.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_nat_core.c
net/netfilter/nft_set_rbtree.c
net/netlink/af_netlink.c
net/sunrpc/xprtrdma/backchannel.c
net/sunrpc/xprtsock.c
scripts/gdb/linux/dmesg.py
sound/core/timer.c
sound/pci/hda/patch_realtek.c
sound/soc/atmel/atmel-classd.c
sound/soc/codecs/da7213.c
sound/soc/codecs/rt286.c
sound/soc/generic/simple-card.c
sound/soc/intel/skylake/skl-sst-ipc.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/intel/skylake/skl.c
sound/soc/intel/skylake/skl.h
sound/soc/sh/rcar/adg.c
sound/soc/sh/rcar/cmd.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/gen.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/src.c
sound/soc/sh/rcar/ssi.c
sound/soc/sh/rcar/ssiu.c
sound/soc/soc-core.c
sound/usb/mixer_us16x08.c
usr/Kconfig

index 15f79c27748df1611b1643b77ca68e2a5e7cfaab..0f5c3b4347c6f94a82385f193e76bc99dba19db5 100644 (file)
 
        dscc4.setup=    [NET]
 
+       dt_cpu_ftrs=    [PPC]
+                       Format: {"off" | "known"}
+                       Control how the dt_cpu_ftrs device-tree binding is
+                       used for CPU feature discovery and setup (if it
+                       exists).
+                       off: Do not use it, fall back to legacy cpu table.
+                       known: Do not pass through unknown features to guests
+                       or userspace, only those that the kernel is aware of.
+
        dump_apple_properties   [X86]
                        Dump name and content of EFI device properties on
                        x86 Macs.  Useful for driver authors to determine
index 7ef9dbb08957a593528a4d873d1f3af29892f5c2..1d4d0f49c9d06eb66d9957fb0661cec35ddc7af9 100644 (file)
@@ -26,6 +26,10 @@ Optional properties:
 - interrupt-controller : Indicates the switch is itself an interrupt
                          controller. This is used for the PHY interrupts.
 #interrupt-cells = <2> : Controller uses two cells, number and flag
+- eeprom-length                : Set to the length of an EEPROM connected to the
+                         switch. Must be set if the switch can not detect
+                         the presence and/or size of a connected EEPROM,
+                         otherwise optional.
 - mdio                 : Container of PHY and devices on the switches MDIO
                          bus.
 - mdio?                : Container of PHYs and devices on the external MDIO
index 71a3c134af1b25d58613699aa465d17cff2c23d0..f01d154090dab1b3a8a58fee6a599459392aa1f8 100644 (file)
@@ -247,7 +247,6 @@ bias-bus-hold               - latch weakly
 bias-pull-up           - pull up the pin
 bias-pull-down         - pull down the pin
 bias-pull-pin-default  - use pin-default pull state
-bi-directional         - pin supports simultaneous input/output operations
 drive-push-pull                - drive actively high and low
 drive-open-drain       - drive with open drain
 drive-open-source      - drive with open source
@@ -260,7 +259,6 @@ input-debounce              - debounce mode with debound time X
 power-source           - select between different power supplies
 low-power-enable       - enable low power mode
 low-power-disable      - disable low power mode
-output-enable          - enable output on pin regardless of output value
 output-low             - set the pin to output mode with low level
 output-high            - set the pin to output mode with high level
 slew-rate              - set the slew rate
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt
new file mode 100644 (file)
index 0000000..76e016d
--- /dev/null
@@ -0,0 +1,194 @@
+The QorIQ DPAA Ethernet Driver
+==============================
+
+Authors:
+Madalin Bucur <madalin.bucur@nxp.com>
+Camelia Groza <camelia.groza@nxp.com>
+
+Contents
+========
+
+       - DPAA Ethernet Overview
+       - DPAA Ethernet Supported SoCs
+       - Configuring DPAA Ethernet in your kernel
+       - DPAA Ethernet Frame Processing
+       - DPAA Ethernet Features
+       - Debugging
+
+DPAA Ethernet Overview
+======================
+
+DPAA stands for Data Path Acceleration Architecture and it is a
+set of networking acceleration IPs that are available on several
+generations of SoCs, both on PowerPC and ARM64.
+
+The Freescale DPAA architecture consists of a series of hardware blocks
+that support Ethernet connectivity. The Ethernet driver depends upon the
+following drivers in the Linux kernel:
+
+ - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
+    drivers/iommu/fsl_*
+ - Frame Manager (FMan)
+    drivers/net/ethernet/freescale/fman
+ - Queue Manager (QMan), Buffer Manager (BMan)
+    drivers/soc/fsl/qbman
+
+A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
+
+  dpaa_eth       /eth0\     ...       /ethN\
+  driver        |      |             |      |
+  -------------   ----   -----------   ----   -------------
+       -Ports  / Tx  Rx \    ...    / Tx  Rx \
+  FMan        |          |         |          |
+       -MACs  |   MAC0   |         |   MACN   |
+             /   dtsec0   \  ...  /   dtsecN   \ (or tgec)
+            /              \     /              \(or memac)
+  ---------  --------------  ---  --------------  ---------
+      FMan, FMan Port, FMan SP, FMan MURAM drivers
+  ---------------------------------------------------------
+      FMan HW blocks: MURAM, MACs, Ports, SP
+  ---------------------------------------------------------
+
+The dpaa_eth relation to the QMan, BMan and FMan:
+              ________________________________
+  dpaa_eth   /            eth0                \
+  driver    /                                  \
+  ---------   -^-   -^-   -^-   ---    ---------
+  QMan driver / \   / \   / \  \   /  | BMan    |
+             |Rx | |Rx | |Tx | |Tx |  | driver  |
+  ---------  |Dfl| |Err| |Cnf| |FQs|  |         |
+  QMan HW    |FQ | |FQ | |FQs| |   |  |         |
+             /   \ /   \ /   \  \ /   |         |
+  ---------   ---   ---   ---   -v-    ---------
+            |        FMan QMI         |         |
+            | FMan HW       FMan BMI  | BMan HW |
+              -----------------------   --------
+
+where the acronyms used above (and in the code) are:
+DPAA = Data Path Acceleration Architecture
+FMan = DPAA Frame Manager
+QMan = DPAA Queue Manager
+BMan = DPAA Buffers Manager
+QMI = QMan interface in FMan
+BMI = BMan interface in FMan
+FMan SP = FMan Storage Profiles
+MURAM = Multi-user RAM in FMan
+FQ = QMan Frame Queue
+Rx Dfl FQ = default reception FQ
+Rx Err FQ = Rx error frames FQ
+Tx Cnf FQ = Tx confirmation FQs
+Tx FQs = transmission frame queues
+dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
+tgec = ten gigabit Ethernet controller (10 Gbps)
+memac = multirate Ethernet MAC (10/100/1000/10000)
+
+DPAA Ethernet Supported SoCs
+============================
+
+The DPAA drivers enable the Ethernet controllers present on the following SoCs:
+
+# PPC
+P1023
+P2041
+P3041
+P4080
+P5020
+P5040
+T1023
+T1024
+T1040
+T1042
+T2080
+T4240
+B4860
+
+# ARM
+LS1043A
+LS1046A
+
+Configuring DPAA Ethernet in your kernel
+========================================
+
+To enable the DPAA Ethernet driver, the following Kconfig options are required:
+
+# common for arch/arm64 and arch/powerpc platforms
+CONFIG_FSL_DPAA=y
+CONFIG_FSL_FMAN=y
+CONFIG_FSL_DPAA_ETH=y
+CONFIG_FSL_XGMAC_MDIO=y
+
+# for arch/powerpc only
+CONFIG_FSL_PAMU=y
+
+# common options needed for the PHYs used on the RDBs
+CONFIG_VITESSE_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_AQUANTIA_PHY=y
+
+DPAA Ethernet Frame Processing
+==============================
+
+On Rx, buffers for the incoming frames are retrieved from one of the three
+existing buffers pools. The driver initializes and seeds these, each with
+buffers of different sizes: 1KB, 2KB and 4KB.
+
+On Tx, all transmitted frames are returned to the driver through Tx
+confirmation frame queues. The driver is then responsible for freeing the
+buffers. In order to do this properly, a backpointer is added to the buffer
+before transmission that points to the skb. When the buffer returns to the
+driver on a confirmation FQ, the skb can be correctly consumed.
+
+DPAA Ethernet Features
+======================
+
+Currently the DPAA Ethernet driver enables the basic features required for
+a Linux Ethernet driver. The support for advanced features will be added
+gradually.
+
+The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
+checksum offload feature is enabled by default and cannot be controlled through
+ethtool.
+
+The driver has support for multiple prioritized Tx traffic classes. Priorities
+range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
+strict priority levels. Each traffic class contains NR_CPU TX queues. By
+default, only one traffic class is enabled and the lowest priority Tx queues
+are used. Higher priority traffic classes can be enabled with the mqprio
+qdisc. For example, all four traffic classes are enabled on an interface with
+the following command. Furthermore, skb priority levels are mapped to traffic
+classes as follows:
+
+       * priorities 0 to 3 - traffic class 0 (low priority)
+       * priorities 4 to 7 - traffic class 1 (medium-low priority)
+       * priorities 8 to 11 - traffic class 2 (medium-high priority)
+       * priorities 12 to 15 - traffic class 3 (high priority)
+
+tc qdisc add dev <int> root handle 1: \
+        mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
+
+Debugging
+=========
+
+The following statistics are exported for each interface through ethtool:
+
+       - interrupt count per CPU
+       - Rx packets count per CPU
+       - Tx packets count per CPU
+       - Tx confirmed packets count per CPU
+       - Tx S/G frames count per CPU
+       - Tx error count per CPU
+       - Rx error count per CPU
+       - Rx error count per type
+       - congestion related statistics:
+               - congestion status
+               - time spent in congestion
+               - number of time the device entered congestion
+               - dropped packets count per cause
+
+The driver also exports the following information in sysfs:
+
+       - the FQ IDs for each FQ type
+       /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
+
+       - the IDs of the buffer pools in use
+       /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
index bdc4c0db51e1078fb002907124fe7008ef4c0cd4..9c7139d57e5748508ce20400349cfdefd0284f74 100644 (file)
@@ -1,7 +1,7 @@
 TCP protocol
 ============
 
-Last updated: 9 February 2008
+Last updated: 3 June 2017
 
 Contents
 ========
@@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms.
 A congestion control mechanism can be registered through functions in
 tcp_cong.c. The functions used by the congestion control mechanism are
 registered via passing a tcp_congestion_ops struct to
-tcp_register_congestion_control. As a minimum name, ssthresh,
-cong_avoid must be valid.
+tcp_register_congestion_control. As a minimum, the congestion control
+mechanism must provide a valid name and must implement either ssthresh,
+cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
 
 Private data for a congestion control mechanism is stored in tp->ca_priv.
 tcp_ca(tp) returns a pointer to this space.  This is preallocated space - it
 is important to check the size of your private data will fit this space, or
-alternatively space could be allocated elsewhere and a pointer to it could
+alternatively, space could be allocated elsewhere and a pointer to it could
 be stored here.
 
 There are three kinds of congestion control algorithms currently: The
 simplest ones are derived from TCP reno (highspeed, scalable) and just
-provide an alternative the congestion window calculation. More complex
+provide an alternative congestion window calculation. More complex
 ones like BIC try to look at other events to provide better
 heuristics.  There are also round trip time based algorithms like
 Vegas and Westwood+.
@@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm
 needs to maintain fairness and performance. Please review current
 research and RFC's before developing new modules.
 
-The method that is used to determine which congestion control mechanism is
-determined by the setting of the sysctl net.ipv4.tcp_congestion_control.
-The default congestion control will be the last one registered (LIFO);
-so if you built everything as modules, the default will be reno. If you
-build with the defaults from Kconfig, then CUBIC will be builtin (not a
-module) and it will end up the default.
+The default congestion control mechanism is chosen based on the
+DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
+value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
+module will be autoloaded if needed and you will get the expected protocol. If
+you ask for an unknown congestion method, then the sysctl attempt will fail.
 
-If you really want a particular default value then you will need
-to set it with the sysctl.  If you use a sysctl, the module will be autoloaded
-if needed and you will get the expected protocol. If you ask for an
-unknown congestion method, then the sysctl attempt will fail.
-
-If you remove a tcp congestion control module, then you will get the next
+If you remove a TCP congestion control module, then you will get the next
 available one. Since reno cannot be built as a module, and cannot be
-deleted, it will always be available.
+removed, it will always be available.
 
 How the new TCP output machine [nyi] works.
 ===========================================
index 053c3bdd1fe51c1e48983cfa2e3b4b00349035f2..4d8e525b84eeb876cd61235e86ea76f01c1455e2 100644 (file)
@@ -1172,7 +1172,7 @@ N:        clps711x
 
 ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
 M:     Hartley Sweeten <hsweeten@visionengravers.com>
-M:     Ryan Mallon <rmallon@gmail.com>
+M:     Alexander Sverdlin <alexander.sverdlin@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-ep93xx/
@@ -1489,13 +1489,15 @@ M:      Gregory Clement <gregory.clement@free-electrons.com>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-F:     arch/arm/mach-mvebu/
-F:     drivers/rtc/rtc-armada38x.c
 F:     arch/arm/boot/dts/armada*
 F:     arch/arm/boot/dts/kirkwood*
+F:     arch/arm/configs/mvebu_*_defconfig
+F:     arch/arm/mach-mvebu/
 F:     arch/arm64/boot/dts/marvell/armada*
 F:     drivers/cpufreq/mvebu-cpufreq.c
-F:     arch/arm/configs/mvebu_*_defconfig
+F:     drivers/irqchip/irq-armada-370-xp.c
+F:     drivers/irqchip/irq-mvebu-*
+F:     drivers/rtc/rtc-armada38x.c
 
 ARM/Marvell Berlin SoC support
 M:     Jisheng Zhang <jszhang@marvell.com>
@@ -1721,7 +1723,6 @@ N:        rockchip
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
 M:     Krzysztof Kozlowski <krzk@kernel.org>
-R:     Javier Martinez Canillas <javier@osg.samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 Q:     https://patchwork.kernel.org/project/linux-samsung-soc/list/
@@ -1829,7 +1830,6 @@ F:        drivers/edac/altera_edac.
 ARM/STI ARCHITECTURE
 M:     Patrice Chotard <patrice.chotard@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L:     kernel@stlinux.com
 W:     http://www.stlinux.com
 S:     Maintained
 F:     arch/arm/mach-sti/
@@ -7707,7 +7707,7 @@ F:        drivers/platform/x86/hp_accel.c
 
 LIVE PATCHING
 M:     Josh Poimboeuf <jpoimboe@redhat.com>
-M:     Jessica Yu <jeyu@redhat.com>
+M:     Jessica Yu <jeyu@kernel.org>
 M:     Jiri Kosina <jikos@kernel.org>
 M:     Miroslav Benes <mbenes@suse.cz>
 R:     Petr Mladek <pmladek@suse.com>
@@ -8508,7 +8508,7 @@ S:        Odd Fixes
 F:     drivers/media/radio/radio-miropcm20*
 
 MELLANOX MLX4 core VPI driver
-M:     Yishai Hadas <yishaih@mellanox.com>
+M:     Tariq Toukan <tariqt@mellanox.com>
 L:     netdev@vger.kernel.org
 L:     linux-rdma@vger.kernel.org
 W:     http://www.mellanox.com
@@ -8516,7 +8516,6 @@ Q:        http://patchwork.ozlabs.org/project/netdev/list/
 S:     Supported
 F:     drivers/net/ethernet/mellanox/mlx4/
 F:     include/linux/mlx4/
-F:     include/uapi/rdma/mlx4-abi.h
 
 MELLANOX MLX4 IB driver
 M:     Yishai Hadas <yishaih@mellanox.com>
@@ -8526,6 +8525,7 @@ Q:        http://patchwork.kernel.org/project/linux-rdma/list/
 S:     Supported
 F:     drivers/infiniband/hw/mlx4/
 F:     include/linux/mlx4/
+F:     include/uapi/rdma/mlx4-abi.h
 
 MELLANOX MLX5 core VPI driver
 M:     Saeed Mahameed <saeedm@mellanox.com>
@@ -8538,7 +8538,6 @@ Q:        http://patchwork.ozlabs.org/project/netdev/list/
 S:     Supported
 F:     drivers/net/ethernet/mellanox/mlx5/core/
 F:     include/linux/mlx5/
-F:     include/uapi/rdma/mlx5-abi.h
 
 MELLANOX MLX5 IB driver
 M:     Matan Barak <matanb@mellanox.com>
@@ -8549,6 +8548,7 @@ Q:        http://patchwork.kernel.org/project/linux-rdma/list/
 S:     Supported
 F:     drivers/infiniband/hw/mlx5/
 F:     include/linux/mlx5/
+F:     include/uapi/rdma/mlx5-abi.h
 
 MELEXIS MLX90614 DRIVER
 M:     Crt Mori <cmo@melexis.com>
@@ -8588,7 +8588,7 @@ S:        Maintained
 F:     drivers/media/dvb-frontends/mn88473*
 
 MODULE SUPPORT
-M:     Jessica Yu <jeyu@redhat.com>
+M:     Jessica Yu <jeyu@kernel.org>
 M:     Rusty Russell <rusty@rustcorp.com.au>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
 S:     Maintained
@@ -10450,7 +10450,7 @@ S:      Orphan
 
 PXA RTC DRIVER
 M:     Robert Jarzmik <robert.jarzmik@free.fr>
-L:     rtc-linux@googlegroups.com
+L:     linux-rtc@vger.kernel.org
 S:     Maintained
 
 QAT DRIVER
@@ -10757,7 +10757,7 @@ X:      kernel/torture.c
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
 M:     Alexandre Belloni <alexandre.belloni@free-electrons.com>
-L:     rtc-linux@googlegroups.com
+L:     linux-rtc@vger.kernel.org
 Q:     http://patchwork.ozlabs.org/project/rtc-linux/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
 S:     Maintained
@@ -11268,7 +11268,6 @@ F:      drivers/media/rc/serial_ir.c
 
 STI CEC DRIVER
 M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
-L:     kernel@stlinux.com
 S:     Maintained
 F:     drivers/staging/media/st-cec/
 F:     Documentation/devicetree/bindings/media/stih-cec.txt
@@ -11778,6 +11777,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
 S:     Supported
 F:     arch/arm/mach-davinci/
 F:     drivers/i2c/busses/i2c-davinci.c
+F:     arch/arm/boot/dts/da850*
 
 TI DAVINCI SERIES MEDIA DRIVER
 M:     "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
@@ -13861,7 +13861,7 @@ S:      Odd fixes
 F:     drivers/net/wireless/wl3501*
 
 WOLFSON MICROELECTRONICS DRIVERS
-L:     patches@opensource.wolfsonmicro.com
+L:     patches@opensource.cirrus.com
 T:     git https://github.com/CirrusLogic/linux-drivers.git
 W:     https://github.com/CirrusLogic/linux-drivers/wiki
 S:     Supported
index 470bd4d9513ac42eb164cb4513300966a726fa37..853ae9179af93a0ca9751a7faf40372e7fdb15dd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 9d5dc4fda3c16710c0443b6a5043f212230f057d..3f7d1b74c5e02bd46730c58b0a66756c89b904ab 100644 (file)
                @ there.
                .inst   'M' | ('Z' << 8) | (0x1310 << 16)   @ tstne r0, #0x4d000
 #else
-               mov     r0, r0
+               W(mov)  r0, r0
 #endif
                .endm
 
                .macro  __EFI_HEADER
 #ifdef CONFIG_EFI_STUB
-               b       __efi_start
-
                .set    start_offset, __efi_start - start
                .org    start + 0x3c
                @
index 7c711ba614173d91d8c2fd6ff4ccb13980bb3109..8a756870c238435af684215c653f54a739f4f1a5 100644 (file)
@@ -130,19 +130,22 @@ start:
                .rept   7
                __nop
                .endr
-   ARM(                mov     r0, r0          )
-   ARM(                b       1f              )
- THUMB(                badr    r12, 1f         )
- THUMB(                bx      r12             )
+#ifndef CONFIG_THUMB2_KERNEL
+               mov     r0, r0
+#else
+ AR_CLASS(     sub     pc, pc, #3      )       @ A/R: switch to Thumb2 mode
+  M_CLASS(     nop.w                   )       @ M: already in Thumb2 mode
+               .thumb
+#endif
+               W(b)    1f
 
                .word   _magic_sig      @ Magic numbers to help the loader
                .word   _magic_start    @ absolute load/run zImage address
                .word   _magic_end      @ zImage end address
                .word   0x04030201      @ endianness flag
 
- THUMB(                .thumb                  )
-1:             __EFI_HEADER
-
+               __EFI_HEADER
+1:
  ARM_BE8(      setend  be              )       @ go BE8 if compiled for BE8
  AR_CLASS(     mrs     r9, cpsr        )
 #ifdef CONFIG_ARM_VIRT_EXT
index 561f27d8d92224fe8f4f8c3224a5441f2d41175a..9444a9a9ba1057e6b594dc8e2595ac1e5ec593fb 100644 (file)
@@ -3,6 +3,11 @@
 #include <dt-bindings/clock/bcm2835-aux.h>
 #include <dt-bindings/gpio/gpio.h>
 
+/* firmware-provided startup stubs live here, where the secondary CPUs are
+ * spinning.
+ */
+/memreserve/ 0x00000000 0x00001000;
+
 /* This include file covers the common peripherals and configuration between
  * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
  * bcm2835.dtsi and bcm2836.dtsi.
index f18e1f1d0ce2c6aad83c08b7ad8ac4cfc41e715e..d2be8aa3370b7840e014964c147a4742d5ca87e8 100644 (file)
 
                ethphy0: ethernet-phy@2 {
                        reg = <2>;
+                       micrel,led-mode = <1>;
+                       clocks = <&clks IMX6UL_CLK_ENET_REF>;
+                       clock-names = "rmii-ref";
                };
 
                ethphy1: ethernet-phy@1 {
                        reg = <1>;
+                       micrel,led-mode = <1>;
+                       clocks = <&clks IMX6UL_CLK_ENET2_REF>;
+                       clock-names = "rmii-ref";
                };
        };
 };
index b6f26824e83a96a88e34d33b99a0f4dace08e306..66f615a74118b9da1fe77088b63be8738c17f6c4 100644 (file)
@@ -137,8 +137,8 @@ netcp: netcp@26000000 {
        /* NetCP address range */
        ranges = <0 0x26000000 0x1000000>;
 
-       clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>;
-       clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk";
+       clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
+       clock-names = "pa_clk", "ethss_clk", "cpts";
        dma-coherent;
 
        ti,navigator-dmas = <&dma_gbe 0>,
index b58e7ebc091994645dd1adb35c7e7dc843fae7b0..148650406cf701cd7ffc5ac92d9054d606e97bfd 100644 (file)
                        };
                };
 
+               osr: sram@70000000 {
+                       compatible = "mmio-sram";
+                       reg = <0x70000000 0x10000>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       clocks = <&clkosr>;
+               };
+
                dspgpio0: keystone_dsp_gpio@02620240 {
                        compatible = "ti,keystone-dsp-gpio";
                        gpio-controller;
index 33a8eb28374eaa8d3b8aca95d8801227bedd87ca..06e2331f666d45fb2a2432ac1ef5401c3c50e37d 100644 (file)
@@ -1,4 +1,4 @@
-#include <versatile-ab.dts>
+#include "versatile-ab.dts"
 
 / {
        model = "ARM Versatile PB";
index cf062472e07bcb4be470bf35ab029df3438dbc7e..2b913f17d50f5d91f50d3aa30e3a8a26c97847b9 100644 (file)
@@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
        return ret;
 }
 
-typedef void (*phys_reset_t)(unsigned long);
+typedef typeof(cpu_reset) phys_reset_t;
 
 void mcpm_cpu_power_down(void)
 {
@@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void)
         * on the CPU.
         */
        phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
-       phys_reset(__pa_symbol(mcpm_entry_point));
+       phys_reset(__pa_symbol(mcpm_entry_point), false);
 
        /* should never get here */
        BUG();
@@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg)
        __mcpm_cpu_down(cpu, cluster);
 
        phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
-       phys_reset(__pa_symbol(mcpm_entry_point));
+       phys_reset(__pa_symbol(mcpm_entry_point), false);
        BUG();
 }
 
index 302240c19a5aa688e7bdab1ece506dfbeaccea4e..a0d726a47c8a272b722b0d5623021058da50e113 100644 (file)
@@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t;
 #define pgprot_noncached(prot) (prot)
 #define pgprot_writecombine(prot) (prot)
 #define pgprot_dmacoherent(prot) (prot)
+#define pgprot_device(prot)    (prot)
 
 
 /*
index 841e924143f90e089bae9269acacdff65ab597f9..cbd959b73654c43deb72cbe084bdb99043a22b66 100644 (file)
@@ -1,6 +1,7 @@
 menuconfig ARCH_AT91
        bool "Atmel SoCs"
        depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
+       select ARM_CPU_SUSPEND if PM
        select COMMON_CLK_AT91
        select GPIOLIB
        select PINCTRL
index efb80354f3034d856ab259bb1568dab41173d3b3..b5cc05dc2cb27c9e20e5f8290b65fff2efeec8ec 100644 (file)
@@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
        davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
        if (!davinci_sram_suspend) {
                pr_err("PM: cannot allocate SRAM memory\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto no_sram_mem;
        }
 
        davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
@@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
 
        suspend_set_ops(&davinci_pm_ops);
 
+       return 0;
+
+no_sram_mem:
+       iounmap(pm_config.ddrpsc_reg_base);
 no_ddrpsc_mem:
        iounmap(pm_config.ddrpll_reg_base);
 no_ddrpll_mem:
index ac8df5201cd656d70073bc03cd13436435b79c66..b4bc42ece7541154431a5855c4bbe0f984094445 100644 (file)
                        cpm_crypto: crypto@800000 {
                                compatible = "inside-secure,safexcel-eip197";
                                reg = <0x800000 0x200000>;
-                               interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
-                               | IRQ_TYPE_LEVEL_HIGH)>,
+                               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
index 7740a75a823084d027ffab1c02d221f3083dea87..6e2058847ddcd59ca9fd0d472bfb94f5331b00cd 100644 (file)
                        cps_crypto: crypto@800000 {
                                compatible = "inside-secure,safexcel-eip197";
                                reg = <0x800000 0x200000>;
-                               interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
-                               | IRQ_TYPE_LEVEL_HIGH)>,
+                               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
index 65cdd878cfbd603b323a08006872f5de90e90aee..97c123e09e45bfd80173029de0da0161dd4be0c7 100644 (file)
@@ -68,6 +68,7 @@ CONFIG_PCIE_QCOM=y
 CONFIG_PCIE_ARMADA_8K=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=m
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_ARM64_VA_BITS_48=y
@@ -208,6 +209,8 @@ CONFIG_BRCMFMAC=m
 CONFIG_WL18XX=m
 CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_PM8941_PWRKEY=y
@@ -263,6 +266,7 @@ CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
 CONFIG_SPI_S3C64XX=y
 CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
@@ -292,6 +296,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
 CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
 CONFIG_EXYNOS_THERMAL=y
+CONFIG_ROCKCHIP_THERMAL=m
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
 CONFIG_MESON_GXBB_WATCHDOG=m
@@ -300,12 +305,14 @@ CONFIG_RENESAS_WDT=y
 CONFIG_BCM2835_WDT=y
 CONFIG_MFD_CROS_EC=y
 CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
 CONFIG_MFD_EXYNOS_LPASS=m
 CONFIG_MFD_HI655X_PMIC=y
 CONFIG_MFD_MAX77620=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_RK808=y
 CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FAN53555=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_HI655X=y
@@ -473,8 +480,10 @@ CONFIG_ARCH_TEGRA_186_SOC=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
 CONFIG_PWM=y
 CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
 CONFIG_PWM_MESON=m
 CONFIG_PWM_ROCKCHIP=y
 CONFIG_PWM_SAMSUNG=y
@@ -484,6 +493,7 @@ CONFIG_PHY_HI6220_USB=y
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_ROCKCHIP_INNO_USB2=y
 CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
 CONFIG_PHY_XGENE=y
 CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_ARM_SCPI_PROTOCOL=y
index 0e99978da3f05013d145132950ad204a92e3e4b0..59cca1d6ec547270adbd56a4e2265b9f9fc34375 100644 (file)
@@ -23,9 +23,9 @@
 #define ACPI_MADT_GICC_LENGTH  \
        (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
 
-#define BAD_MADT_GICC_ENTRY(entry, end)                                                \
-       (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) ||       \
-        (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+#define BAD_MADT_GICC_ENTRY(entry, end)                                        \
+       (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
+       (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
 
 /* Basic configuration for ACPI */
 #ifdef CONFIG_ACPI
index 4f0e3ebfea4b4f6496a783bd172abc05e7ec1d4a..c7e3e6387a4910a6377d78e10caf98cc1c20243b 100644 (file)
@@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
                return NULL;
 
        root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
-       if (!root_ops)
+       if (!root_ops) {
+               kfree(ri);
                return NULL;
+       }
 
        ri->cfg = pci_acpi_setup_ecam_mapping(root);
        if (!ri->cfg) {
index a89bddefdacf9194373a201c6c6c4cf8c4ac87c0..139093fab3260debefb4da2fbd33e37778784298 100644 (file)
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
 #define vxtime_lock()          do {} while (0)
 #define vxtime_unlock()                do {} while (0)
 
+/* This attribute is used in include/linux/jiffies.h alongside with
+ * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
+ * for frv does not contain another section specification.
+ */
+#define __jiffy_arch_data      __attribute__((__section__(".data")))
+
 #endif
 
index 918d4c73e951d7815fc4063322c9ed8e493afb9f..5351e1f3950d158aaa5ff2590c32734862a79834 100644 (file)
@@ -120,7 +120,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
        struct thread_info *ti = task_thread_info(p);
        struct pt_regs *childregs, *regs = current_pt_regs();
        unsigned long childksp;
-       p->set_child_tid = p->clear_child_tid = NULL;
 
        childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
 
index f8da545854f979c33a7b3116d26d822caa46c494..106859ae27ffba114f9f4b0011151db0f65f98d4 100644 (file)
@@ -167,8 +167,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
 
        top_of_kernel_stack = sp;
 
-       p->set_child_tid = p->clear_child_tid = NULL;
-
        /* Locate userspace context on stack... */
        sp -= STACK_FRAME_OVERHEAD;     /* redzone */
        sp -= sizeof(struct pt_regs);
index f7c8f9972f618109209e4892512ed903f6b865f3..964da1891ea9cc6b5dc174131db1c7bbdc2f07f4 100644 (file)
@@ -380,22 +380,6 @@ source "arch/powerpc/platforms/Kconfig"
 
 menu "Kernel options"
 
-config PPC_DT_CPU_FTRS
-       bool "Device-tree based CPU feature discovery & setup"
-       depends on PPC_BOOK3S_64
-       default n
-       help
-         This enables code to use a new device tree binding for describing CPU
-         compatibility and features. Saying Y here will attempt to use the new
-         binding if the firmware provides it. Currently only the skiboot
-         firmware provides this binding.
-         If you're not sure say Y.
-
-config PPC_CPUFEATURES_ENABLE_UNKNOWN
-       bool "cpufeatures pass through unknown features to guest/userspace"
-       depends on PPC_DT_CPU_FTRS
-       default y
-
 config HIGHMEM
        bool "High memory support"
        depends on PPC32
index b4b5e6b671ca4dedc27fc35d59b30d4ad488e1c3..0c4e470571ca0faa74d3e9fa38fa57a384cab4bf 100644 (file)
@@ -8,7 +8,7 @@
 #define H_PTE_INDEX_SIZE  9
 #define H_PMD_INDEX_SIZE  7
 #define H_PUD_INDEX_SIZE  9
-#define H_PGD_INDEX_SIZE  12
+#define H_PGD_INDEX_SIZE  9
 
 #ifndef __ASSEMBLY__
 #define H_PTE_TABLE_SIZE       (sizeof(pte_t) << H_PTE_INDEX_SIZE)
index c2d509584a98070accd6be62519fa992e5bc1f3f..d02ad93bf70892f8d342b9d8890a4e1b8065eed6 100644 (file)
@@ -214,7 +214,6 @@ enum {
 #define CPU_FTR_DAWR                   LONG_ASM_CONST(0x0400000000000000)
 #define CPU_FTR_DABRX                  LONG_ASM_CONST(0x0800000000000000)
 #define CPU_FTR_PMAO_BUG               LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_SUBCORE                        LONG_ASM_CONST(0x2000000000000000)
 #define CPU_FTR_POWER9_DD1             LONG_ASM_CONST(0x4000000000000000)
 
 #ifndef __ASSEMBLY__
@@ -463,7 +462,7 @@ enum {
            CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
            CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
-           CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
+           CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
 #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
index a2123f291ab0c5c8dc13cc9364c3a12848a4bb2c..bb99b651085aaf292e5f98ee23c7cdc53d443cd2 100644 (file)
@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
 #define TASK_SIZE_128TB (0x0000800000000000UL)
 #define TASK_SIZE_512TB (0x0002000000000000UL)
 
-#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
 /*
  * Max value currently used:
  */
-#define TASK_SIZE_USER64       TASK_SIZE_512TB
+#define TASK_SIZE_USER64               TASK_SIZE_512TB
+#define DEFAULT_MAP_WINDOW_USER64      TASK_SIZE_128TB
 #else
-#define TASK_SIZE_USER64       TASK_SIZE_64TB
+#define TASK_SIZE_USER64               TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64      TASK_SIZE_64TB
 #endif
 
 /*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
  * space during mmap's.
  */
 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
 
 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
                TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,21 +148,15 @@ void release_thread(struct task_struct *);
  * with 128TB and conditionally enable upto 512TB
  */
 #ifdef CONFIG_PPC_BOOK3S_64
-#define DEFAULT_MAP_WINDOW     ((is_32bit_task()) ? \
-                                TASK_SIZE_USER32 : TASK_SIZE_128TB)
+#define DEFAULT_MAP_WINDOW     ((is_32bit_task()) ?                    \
+                                TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
 #else
 #define DEFAULT_MAP_WINDOW     TASK_SIZE
 #endif
 
 #ifdef __powerpc64__
 
-#ifdef CONFIG_PPC_BOOK3S_64
-/* Limit stack to 128TB */
-#define STACK_TOP_USER64 TASK_SIZE_128TB
-#else
-#define STACK_TOP_USER64 TASK_SIZE_USER64
-#endif
-
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
 #define STACK_TOP_USER32 TASK_SIZE_USER32
 
 #define STACK_TOP (is_32bit_task() ? \
index 8b3b46b7b0f2795b6195eb95ee649d3dece6dc9a..329771559cbbb16048d67d27450865703a248c90 100644 (file)
@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
 extern int sysfs_add_device_to_node(struct device *dev, int nid);
 extern void sysfs_remove_device_from_node(struct device *dev, int nid);
 
+static inline int early_cpu_to_node(int cpu)
+{
+       int nid;
+
+       nid = numa_cpu_lookup_table[cpu];
+
+       /*
+        * Fall back to node 0 if nid is unset (it should be, except bugs).
+        * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+        */
+       return (nid < 0) ? 0 : nid;
+}
 #else
 
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
 static inline void dump_numa_cpu_topology(void) {}
 
 static inline int sysfs_add_device_to_node(struct device *dev, int nid)
index fcc7588a96d694265899935eae34521eda29690d..4c7656dc4e04f09bed8b9bbc8d8f979876237202 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/jump_label.h>
+#include <linux/libfdt.h>
 #include <linux/memblock.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
@@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
        {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
        {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
        {"processor-utilization-of-resources-register", feat_enable_purr, 0},
-       {"subcore", feat_enable, CPU_FTR_SUBCORE},
        {"no-execute", feat_enable, 0},
        {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
        {"cache-inhibited-large-page", feat_enable_large_ci, 0},
@@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
        {"wait-v3", feat_enable, 0},
 };
 
-/* XXX: how to configure this? Default + boot time? */
-#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
-#define CPU_FEATURE_ENABLE_UNKNOWN 1
-#else
-#define CPU_FEATURE_ENABLE_UNKNOWN 0
-#endif
+static bool __initdata using_dt_cpu_ftrs;
+static bool __initdata enable_unknown = true;
+
+static int __init dt_cpu_ftrs_parse(char *str)
+{
+       if (!str)
+               return 0;
+
+       if (!strcmp(str, "off"))
+               using_dt_cpu_ftrs = false;
+       else if (!strcmp(str, "known"))
+               enable_unknown = false;
+       else
+               return 1;
+
+       return 0;
+}
+early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
 
 static void __init cpufeatures_setup_start(u32 isa)
 {
@@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
                }
        }
 
-       if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
+       if (!known && enable_unknown) {
                if (!feat_try_enable_unknown(f)) {
                        pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
                                f->name);
@@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
                cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
 }
 
+static int __init disabled_on_cmdline(void)
+{
+       unsigned long root, chosen;
+       const char *p;
+
+       root = of_get_flat_dt_root();
+       chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+       if (chosen == -FDT_ERR_NOTFOUND)
+               return false;
+
+       p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
+       if (!p)
+               return false;
+
+       if (strstr(p, "dt_cpu_ftrs=off"))
+               return true;
+
+       return false;
+}
+
 static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
                                        int depth, void *data)
 {
@@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
        return 0;
 }
 
-static bool __initdata using_dt_cpu_ftrs = false;
-
 bool __init dt_cpu_ftrs_in_use(void)
 {
        return using_dt_cpu_ftrs;
@@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
 
 bool __init dt_cpu_ftrs_init(void *fdt)
 {
+       using_dt_cpu_ftrs = false;
+
        /* Setup and verify the FDT, if it fails we just bail */
        if (!early_init_dt_verify(fdt))
                return false;
@@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
        if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
                return false;
 
+       if (disabled_on_cmdline())
+               return false;
+
        cpufeatures_setup_cpu();
 
        using_dt_cpu_ftrs = true;
@@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
 
 void __init dt_cpu_ftrs_scan(void)
 {
+       if (!using_dt_cpu_ftrs)
+               return;
+
        of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
 }
index baae104b16c7ba9f7cdf4a305ab5227ebf002467..2ad725ef4368a3e525681b0ce4a56ef8e960bf48 100644 (file)
@@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 #ifdef CONFIG_VSX
        current->thread.used_vsr = 0;
 #endif
+       current->thread.load_fp = 0;
        memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
        current->thread.fp_save_area = NULL;
 #ifdef CONFIG_ALTIVEC
@@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        current->thread.vr_save_area = NULL;
        current->thread.vrsave = 0;
        current->thread.used_vr = 0;
+       current->thread.load_vec = 0;
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
        memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        current->thread.tm_tfhar = 0;
        current->thread.tm_texasr = 0;
        current->thread.tm_tfiar = 0;
+       current->thread.load_tm = 0;
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 }
 EXPORT_SYMBOL(start_thread);
index 71dcda91755d51a2e5705f29308239e7e9c7e506..857129acf960a1bf93c0c5791a6de1d84585caa3 100644 (file)
@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
 
 #ifdef CONFIG_PPC_MM_SLICES
 #ifdef CONFIG_PPC64
-       init_mm.context.addr_limit = TASK_SIZE_128TB;
+       init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 #else
 #error "context.addr_limit not initialized."
 #endif
index f35ff9dea4fb4607459c10d42a29c47f2984e613..a8c1f99e96072530cb1f2d9ed702dffd78665720 100644 (file)
@@ -661,7 +661,7 @@ void __init emergency_stack_init(void)
 
 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 {
-       return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+       return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
                                    __pa(MAX_DMA_ADDRESS));
 }
 
@@ -672,7 +672,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
 
 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
-       if (cpu_to_node(from) == cpu_to_node(to))
+       if (early_cpu_to_node(from) == early_cpu_to_node(to))
                return LOCAL_DISTANCE;
        else
                return REMOTE_DISTANCE;
index c6dca2ae78ef9f1225dd6a13e0997034132315a4..a3edf813d4556c547e5b00155c5f9a0dc411872d 100644 (file)
@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
         * mm->context.addr_limit. Default to max task size so that we copy the
         * default values to paca which will help us to handle slb miss early.
         */
-       mm->context.addr_limit = TASK_SIZE_128TB;
+       mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 
        /*
         * The old code would re-promote on fork, we don't do that when using
index 018f8e90ac35fd19bf37bdbb99438f970621634a..bb28e1a412576ea15492be658a5bf78ee75950a5 100644 (file)
@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
        .name                   = "POWER9",
        .n_counter              = MAX_PMU_COUNTERS,
        .add_fields             = ISA207_ADD_FIELDS,
-       .test_adder             = ISA207_TEST_ADDER,
+       .test_adder             = P9_DD1_TEST_ADDER,
        .compute_mmcr           = isa207_compute_mmcr,
        .config_bhrb            = power9_config_bhrb,
        .bhrb_filter_map        = power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
        .name                   = "POWER9",
        .n_counter              = MAX_PMU_COUNTERS,
        .add_fields             = ISA207_ADD_FIELDS,
-       .test_adder             = P9_DD1_TEST_ADDER,
+       .test_adder             = ISA207_TEST_ADDER,
        .compute_mmcr           = isa207_compute_mmcr,
        .config_bhrb            = power9_config_bhrb,
        .bhrb_filter_map        = power9_bhrb_filter_map,
index 33244e3d9375eae3ccd1224b7dbac87b3822f92a..4fd64d3f5c4429206b8c838ca99387e6668aee11 100644 (file)
@@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
 
          In case of doubt, say Y
 
+config PPC_DT_CPU_FTRS
+       bool "Device-tree based CPU feature discovery & setup"
+       depends on PPC_BOOK3S_64
+       default y
+       help
+         This enables code to use a new device tree binding for describing CPU
+         compatibility and features. Saying Y here will attempt to use the new
+         binding if the firmware provides it. Currently only the skiboot
+         firmware provides this binding.
+         If you're not sure say Y.
+
 config UDBG_RTAS_CONSOLE
        bool "RTAS based debug console"
        depends on PPC_RTAS
index e5a891ae80ee5e6881bd10be148c04519d3dffd0..84b7ac926ce65682e83781ec6ab4ac97b6d0727f 100644 (file)
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
        skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
        if (!dump_skip(cprm, skip))
                goto Eio;
+
+       rc = 0;
 out:
        free_page((unsigned long)buf);
        return rc;
index 0babef11136fc8daba7f2666fed3f3b649cc2bd8..8c6119280c1306afd399d2c86ce88381882ab5df 100644 (file)
@@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
 
 static int subcore_init(void)
 {
-       if (!cpu_has_feature(CPU_FTR_SUBCORE))
+       unsigned pvr_ver;
+
+       pvr_ver = PVR_VER(mfspr(SPRN_PVR));
+
+       if (pvr_ver != PVR_POWER8 &&
+           pvr_ver != PVR_POWER8E &&
+           pvr_ver != PVR_POWER8NVL)
                return 0;
 
        /*
index e104c71ea44ab5bf715fa5720c0a1ed47c6221c7..1fb162ba9d1c6aaa730123d07258a3923b3d245d 100644 (file)
@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
        for (i = 0; i < num_lmbs; i++) {
                lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
                lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+               lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
                lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
        }
 
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
        for (i = 0; i < num_lmbs; i++) {
                lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
                lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+               lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
                lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
        }
 
index ef470b470b04ae85488d1a9ffcbe27519884a4db..6afddae2fb4796dc61de3258f683bcc158b15a2e 100644 (file)
@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 
 static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
+       struct u8_gpio_chip *u8_gc =
+               container_of(mm_gc, struct u8_gpio_chip, mm_gc);
 
        u8_gc->data = in_8(mm_gc->regs);
 }
index 58243b0d21c006cfea9c47723221a33b7673fae9..b558c9e29de37f3ebeb18593c1a3d234c7d9d060 100644 (file)
@@ -192,9 +192,9 @@ config NR_CPUS
        int "Maximum number of CPUs"
        depends on SMP
        range 2 32 if SPARC32
-       range 2 1024 if SPARC64
+       range 2 4096 if SPARC64
        default 32 if SPARC32
-       default 64 if SPARC64
+       default 4096 if SPARC64
 
 source kernel/Kconfig.hz
 
@@ -295,9 +295,13 @@ config NUMA
        depends on SPARC64 && SMP
 
 config NODES_SHIFT
-       int
-       default "4"
+       int "Maximum NUMA Nodes (as a power of 2)"
+       range 4 5 if SPARC64
+       default "5"
        depends on NEED_MULTIPLE_NODES
+       help
+         Specify the maximum number of NUMA Nodes available on the target
+         system.  Increases memory reserved to accommodate various tables.
 
 # Some NUMA nodes have memory ranges that span
 # other nodes.  Even though a pfn is valid and
index f7de0dbc38af2dd36c9f34df53e6e951f6729825..83b36a5371ffc62e80fa694e077867f4f8b1f49f 100644 (file)
@@ -52,7 +52,7 @@
 #define CTX_NR_MASK            TAG_CONTEXT_BITS
 #define CTX_HW_MASK            (CTX_NR_MASK | CTX_PGSZ_MASK)
 
-#define CTX_FIRST_VERSION      ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
+#define CTX_FIRST_VERSION      BIT(CTX_VERSION_SHIFT)
 #define CTX_VALID(__ctx)       \
         (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
 #define CTX_HWBITS(__ctx)      ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
index 22fede6eba116020cf7049e2f45a57545a6d55cb..2cddcda4f85f7555dced053b1fc82fd991e19943 100644 (file)
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
 extern unsigned long tlb_context_cache;
 extern unsigned long mmu_context_bmap[];
 
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
 void get_new_mmu_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-void smp_new_mmu_context_version(void);
-#else
-#define smp_new_mmu_context_version() do { } while (0)
-#endif
-
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
 
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
 {
        unsigned long ctx_valid, flags;
-       int cpu;
+       int cpu = smp_processor_id();
 
+       per_cpu(per_cpu_secondary_mm, cpu) = mm;
        if (unlikely(mm == &init_mm))
                return;
 
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
         * for the first time, we must flush that context out of the
         * local TLB.
         */
-       cpu = smp_processor_id();
        if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
                cpumask_set_cpu(cpu, mm_cpumask(mm));
                __flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
 }
 
 #define deactivate_mm(tsk,mm)  do { } while (0)
-
-/* Activate a new MM instance for the current task. */
-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
-       unsigned long flags;
-       int cpu;
-
-       spin_lock_irqsave(&mm->context.lock, flags);
-       if (!CTX_VALID(mm->context))
-               get_new_mmu_context(mm);
-       cpu = smp_processor_id();
-       if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
-               cpumask_set_cpu(cpu, mm_cpumask(mm));
-
-       load_secondary_context(mm);
-       __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
-       tsb_context_switch(mm);
-       spin_unlock_irqrestore(&mm->context.lock, flags);
-}
-
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(__SPARC64_MMU_CONTEXT_H) */
index 2669370305465d7a4d1c2f9c5a7c9eae2f66474d..522b43db2ed336a5d34dee17b4e8c3b593e6ee5d 100644 (file)
@@ -20,7 +20,6 @@
 #define PIL_SMP_CALL_FUNC      1
 #define PIL_SMP_RECEIVE_SIGNAL 2
 #define PIL_SMP_CAPTURE                3
-#define PIL_SMP_CTX_NEW_VERSION        4
 #define PIL_DEVICE_IRQ         5
 #define PIL_SMP_CALL_FUNC_SNGL 6
 #define PIL_DEFERRED_PCR_WORK  7
index 8174f6cdbbbbd87af5bdbcb923352ddadb4e237e..9dca7a892978a49d234a2b9325228b2c900d1276 100644 (file)
@@ -327,6 +327,7 @@ struct vio_dev {
        int                     compat_len;
 
        u64                     dev_no;
+       u64                     id;
 
        unsigned long           channel_id;
 
index b542cc7c8d94d8fc75319091f91ba5dc25251fd9..f87265afb1759e16b735e95067044f827dff27e7 100644 (file)
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
                pbuf.req.handle = cp->handle;
                pbuf.req.major = 1;
                pbuf.req.minor = 0;
-               strcpy(pbuf.req.svc_id, cp->service_id);
+               strcpy(pbuf.id_buf, cp->service_id);
 
                err = __ds_send(lp, &pbuf, msg_len);
                if (err > 0)
index 4d0248aa0928695597161d93f325a49311b43e2c..99dd133a029f06528f78b2b811d81c97fc7cf9f4 100644 (file)
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
 {
 #ifdef CONFIG_SMP
        unsigned long page;
+       void *mondo, *p;
 
-       BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+       BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+       /* Make sure mondo block is 64byte aligned */
+       p = kzalloc(127, GFP_KERNEL);
+       if (!p) {
+               prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+               prom_halt();
+       }
+       mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+       tb->cpu_mondo_block_pa = __pa(mondo);
 
        page = get_zeroed_page(GFP_KERNEL);
        if (!page) {
-               prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+               prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
                prom_halt();
        }
 
-       tb->cpu_mondo_block_pa = __pa(page);
-       tb->cpu_list_pa = __pa(page + 64);
+       tb->cpu_list_pa = __pa(page);
 #endif
 }
 
index c9804551262cc2832ea35b0d1285dc3051319fd4..6ae1e77be0bfde27696595a1f33e939935d83321 100644 (file)
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
 /* smp_64.c */
 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
 
index b3bc0ac757cc11c0c77e106a447817b89d821cae..fdf31040a7dc5cc460de6d60c9724a60933b38dc 100644 (file)
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        preempt_enable();
 }
 
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
-       struct mm_struct *mm;
-       unsigned long flags;
-
-       clear_softint(1 << irq);
-
-       /* See if we need to allocate a new TLB context because
-        * the version of the one we are using is now out of date.
-        */
-       mm = current->active_mm;
-       if (unlikely(!mm || (mm == &init_mm)))
-               return;
-
-       spin_lock_irqsave(&mm->context.lock, flags);
-
-       if (unlikely(!CTX_VALID(mm->context)))
-               get_new_mmu_context(mm);
-
-       spin_unlock_irqrestore(&mm->context.lock, flags);
-
-       load_secondary_context(mm);
-       __flush_tlb_mm(CTX_HWBITS(mm->context),
-                      SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
-       smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
 #ifdef CONFIG_KGDB
 void kgdb_roundup_cpus(unsigned long flags)
 {
index 10689cfd0ad40e6b12ae6b148f99ac2f5c7deb64..07c0df92496034efd1262dd2b40e56ffd5486c0c 100644 (file)
@@ -455,13 +455,16 @@ __tsb_context_switch:
        .type   copy_tsb,#function
 copy_tsb:              /* %o0=old_tsb_base, %o1=old_tsb_size
                         * %o2=new_tsb_base, %o3=new_tsb_size
+                        * %o4=page_size_shift
                         */
        sethi           %uhi(TSB_PASS_BITS), %g7
        srlx            %o3, 4, %o3
-       add             %o0, %o1, %g1   /* end of old tsb */
+       add             %o0, %o1, %o1   /* end of old tsb */
        sllx            %g7, 32, %g7
        sub             %o3, 1, %o3     /* %o3 == new tsb hash mask */
 
+       mov             %o4, %g1        /* page_size_shift */
+
 661:   prefetcha       [%o0] ASI_N, #one_read
        .section        .tsb_phys_patch, "ax"
        .word           661b
@@ -486,9 +489,9 @@ copy_tsb:           /* %o0=old_tsb_base, %o1=old_tsb_size
        /* This can definitely be computed faster... */
        srlx            %o0, 4, %o5     /* Build index */
        and             %o5, 511, %o5   /* Mask index */
-       sllx            %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+       sllx            %o5, %g1, %o5   /* Put into vaddr position */
        or              %o4, %o5, %o4   /* Full VADDR. */
-       srlx            %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+       srlx            %o4, %g1, %o4   /* Shift down to create index */
        and             %o4, %o3, %o4   /* Mask with new_tsb_nents-1 */
        sllx            %o4, 4, %o4     /* Shift back up into tsb ent offset */
        TSB_STORE(%o2 + %o4, %g2)       /* Store TAG */
@@ -496,7 +499,7 @@ copy_tsb:           /* %o0=old_tsb_base, %o1=old_tsb_size
        TSB_STORE(%o2 + %o4, %g3)       /* Store TTE */
 
 80:    add             %o0, 16, %o0
-       cmp             %o0, %g1
+       cmp             %o0, %o1
        bne,pt          %xcc, 90b
         nop
 
index 7bd8f6556352d91cdc30e18d590e7421ffba6465..efe93ab4a9c0654143d52569c7157f9117e7ae37 100644 (file)
@@ -50,7 +50,7 @@ tl0_resv03e:  BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
 tl0_irq1:      TRAP_IRQ(smp_call_function_client, 1)
 tl0_irq2:      TRAP_IRQ(smp_receive_signal_client, 2)
 tl0_irq3:      TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4:      TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4:       BTRAP(0x44)
 #else
 tl0_irq1:      BTRAP(0x41)
 tl0_irq2:      BTRAP(0x42)
index f6bb857254fcfa170155d4cd8dc8cb717c5bfb97..075d38980dee394fdb32f86e130d0b3ec37ebfeb 100644 (file)
@@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
        if (!id) {
                dev_set_name(&vdev->dev, "%s", bus_id_name);
                vdev->dev_no = ~(u64)0;
+               vdev->id = ~(u64)0;
        } else if (!cfg_handle) {
                dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
                vdev->dev_no = *id;
+               vdev->id = ~(u64)0;
        } else {
                dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
                             *cfg_handle, *id);
                vdev->dev_no = *cfg_handle;
+               vdev->id = *id;
        }
 
        vdev->dev.parent = parent;
@@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
        (void) vio_create_one(hp, node, &root_vdev->dev);
 }
 
+struct vio_md_node_query {
+       const char *type;
+       u64 dev_no;
+       u64 id;
+};
+
 static int vio_md_node_match(struct device *dev, void *arg)
 {
+       struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
        struct vio_dev *vdev = to_vio_dev(dev);
 
-       if (vdev->mp == (u64) arg)
-               return 1;
+       if (vdev->dev_no != query->dev_no)
+               return 0;
+       if (vdev->id != query->id)
+               return 0;
+       if (strcmp(vdev->type, query->type))
+               return 0;
 
-       return 0;
+       return 1;
 }
 
 static void vio_remove(struct mdesc_handle *hp, u64 node)
 {
+       const char *type;
+       const u64 *id, *cfg_handle;
+       u64 a;
+       struct vio_md_node_query query;
        struct device *dev;
 
-       dev = device_find_child(&root_vdev->dev, (void *) node,
+       type = mdesc_get_property(hp, node, "device-type", NULL);
+       if (!type) {
+               type = mdesc_get_property(hp, node, "name", NULL);
+               if (!type)
+                       type = mdesc_node_name(hp, node);
+       }
+
+       query.type = type;
+
+       id = mdesc_get_property(hp, node, "id", NULL);
+       cfg_handle = NULL;
+       mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+               u64 target;
+
+               target = mdesc_arc_target(hp, a);
+               cfg_handle = mdesc_get_property(hp, target,
+                                               "cfg-handle", NULL);
+               if (cfg_handle)
+                       break;
+       }
+
+       if (!id) {
+               query.dev_no = ~(u64)0;
+               query.id = ~(u64)0;
+       } else if (!cfg_handle) {
+               query.dev_no = *id;
+               query.id = ~(u64)0;
+       } else {
+               query.dev_no = *cfg_handle;
+               query.id = *id;
+       }
+
+       dev = device_find_child(&root_vdev->dev, &query,
                                vio_md_node_match);
        if (dev) {
                printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
 
                device_unregister(dev);
                put_device(dev);
+       } else {
+               if (!id)
+                       printk(KERN_ERR "VIO: Removed unknown %s node.\n",
+                              type);
+               else if (!cfg_handle)
+                       printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
+                              type, *id);
+               else
+                       printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
+                              type, *cfg_handle, *id);
        }
 }
 
index 69912d2f8b54e903ef040b346371cc27204b9d15..07c03e72d81248cebe9c3d48bbc93e1e2de455b7 100644 (file)
@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
 lib-$(CONFIG_SPARC64) += atomic_64.o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
+lib-$(CONFIG_SPARC64) += multi3.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644 (file)
index 0000000..d6b6c97
--- /dev/null
@@ -0,0 +1,35 @@
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+       .text
+       .align  4
+ENTRY(__multi3) /* %o0 = u, %o1 = v */
+       mov     %o1, %g1
+       srl     %o3, 0, %g4
+       mulx    %g4, %g1, %o1
+       srlx    %g1, 0x20, %g3
+       mulx    %g3, %g4, %g5
+       sllx    %g5, 0x20, %o5
+       srl     %g1, 0, %g4
+       sub     %o1, %o5, %o5
+       srlx    %o5, 0x20, %o5
+       addcc   %g5, %o5, %g5
+       srlx    %o3, 0x20, %o5
+       mulx    %g4, %o5, %g4
+       mulx    %g3, %o5, %o5
+       sethi   %hi(0x80000000), %g3
+       addcc   %g5, %g4, %g5
+       srlx    %g5, 0x20, %g5
+       add     %g3, %g3, %g3
+       movcc   %xcc, %g0, %g3
+       addcc   %o5, %g5, %o5
+       sllx    %g4, 0x20, %g4
+       add     %o1, %g4, %o1
+       add     %o5, %g3, %g2
+       mulx    %g1, %o2, %g1
+       add     %g1, %g2, %g1
+       mulx    %o0, %o3, %o0
+       retl
+        add    %g1, %o0, %o0
+ENDPROC(__multi3)
+EXPORT_SYMBOL(__multi3)
index 0cda653ae007645fa01f05b4c40518332159a6ac..3c40ebd50f928cbbbfe69c65c35810a78b30c53d 100644 (file)
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
        }
 
        if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
-               pr_warn("hugepagesz=%llu not supported by MMU.\n",
+               hugetlb_bad_size();
+               pr_err("hugepagesz=%llu not supported by MMU.\n",
                        hugepage_size);
                goto out;
        }
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
 
 /* get_new_mmu_context() uses "cache + 1".  */
 DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
 #define MAX_CTX_NR     (1UL << CTX_NR_BITS)
 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+       unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+       unsigned long new_ver, new_ctx, old_ctx;
+       struct mm_struct *mm;
+       int cpu;
+
+       bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+       /* Reserve kernel context */
+       set_bit(0, mmu_context_bmap);
+
+       new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+       if (unlikely(new_ver == 0))
+               new_ver = CTX_FIRST_VERSION;
+       tlb_context_cache = new_ver;
+
+       /*
+        * Make sure that any new mm that are added into per_cpu_secondary_mm,
+        * are going to go through get_new_mmu_context() path.
+        */
+       mb();
+
+       /*
+        * Updated versions to current on those CPUs that had valid secondary
+        * contexts
+        */
+       for_each_online_cpu(cpu) {
+               /*
+                * If a new mm is stored after we took this mm from the array,
+                * it will go into get_new_mmu_context() path, because we
+                * already bumped the version in tlb_context_cache.
+                */
+               mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+               if (unlikely(!mm || mm == &init_mm))
+                       continue;
+
+               old_ctx = mm->context.sparc64_ctx_val;
+               if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+                       new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+                       set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+                       mm->context.sparc64_ctx_val = new_ctx;
+               }
+       }
+}
 
 /* Caller does TLB context flushing on local CPU if necessary.
  * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
 {
        unsigned long ctx, new_ctx;
        unsigned long orig_pgsz_bits;
-       int new_version;
 
        spin_lock(&ctx_alloc_lock);
+retry:
+       /* wrap might have happened, test again if our context became valid */
+       if (unlikely(CTX_VALID(mm->context)))
+               goto out;
        orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
        ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
        new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
-       new_version = 0;
        if (new_ctx >= (1 << CTX_NR_BITS)) {
                new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
                if (new_ctx >= ctx) {
-                       int i;
-                       new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
-                               CTX_FIRST_VERSION;
-                       if (new_ctx == 1)
-                               new_ctx = CTX_FIRST_VERSION;
-
-                       /* Don't call memset, for 16 entries that's just
-                        * plain silly...
-                        */
-                       mmu_context_bmap[0] = 3;
-                       mmu_context_bmap[1] = 0;
-                       mmu_context_bmap[2] = 0;
-                       mmu_context_bmap[3] = 0;
-                       for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
-                               mmu_context_bmap[i + 0] = 0;
-                               mmu_context_bmap[i + 1] = 0;
-                               mmu_context_bmap[i + 2] = 0;
-                               mmu_context_bmap[i + 3] = 0;
-                       }
-                       new_version = 1;
-                       goto out;
+                       mmu_context_wrap();
+                       goto retry;
                }
        }
+       if (mm->context.sparc64_ctx_val)
+               cpumask_clear(mm_cpumask(mm));
        mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
        new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
        tlb_context_cache = new_ctx;
        mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
        spin_unlock(&ctx_alloc_lock);
-
-       if (unlikely(new_version))
-               smp_new_mmu_context_version();
 }
 
 static int numa_enabled = 1;
index bedf08b22a4773c5a104b56f7de8b44c461630c1..0d4b998c7d7b74a9e930f12d735591f14769befd 100644 (file)
@@ -496,7 +496,8 @@ retry_tsb_alloc:
                extern void copy_tsb(unsigned long old_tsb_base,
                                     unsigned long old_tsb_size,
                                     unsigned long new_tsb_base,
-                                    unsigned long new_tsb_size);
+                                    unsigned long new_tsb_size,
+                                    unsigned long page_size_shift);
                unsigned long old_tsb_base = (unsigned long) old_tsb;
                unsigned long new_tsb_base = (unsigned long) new_tsb;
 
@@ -504,7 +505,9 @@ retry_tsb_alloc:
                        old_tsb_base = __pa(old_tsb_base);
                        new_tsb_base = __pa(new_tsb_base);
                }
-               copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
+               copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+                       tsb_index == MM_TSB_BASE ?
+                       PAGE_SHIFT : REAL_HPAGE_SHIFT);
        }
 
        mm->context.tsb_block[tsb_index].tsb = new_tsb;
index 5d2fd6cd31896b87a3373a59cbfc3130808c6908..fcf4d27a38fb47af30d026079022d07bb803e323 100644 (file)
@@ -971,11 +971,6 @@ xcall_capture:
        wr              %g0, (1 << PIL_SMP_CAPTURE), %set_softint
        retry
 
-       .globl          xcall_new_mmu_context_version
-xcall_new_mmu_context_version:
-       wr              %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
-       retry
-
 #ifdef CONFIG_KGDB
        .globl          xcall_kgdb_capture
 xcall_kgdb_capture:
index 45db4d2ebd0118e666c205185b9162d13e33316e..e9f4d762aa5b5cabde501f95045fad6c43fef54b 100644 (file)
@@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
 }
 
 static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
 
 int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
 {
@@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
        if (!desc.mc)
                return -EINVAL;
 
-       ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
-                                desc.data, desc.size);
+       ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
        if (ret != UCODE_OK)
                return -EINVAL;
 
@@ -675,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
 }
 
 static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
 {
        enum ucode_state ret;
 
@@ -689,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
 
 #ifdef CONFIG_X86_32
        /* save BSP's matching patch for early load */
-       if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
-               struct ucode_patch *p = find_patch(cpu);
+       if (save) {
+               struct ucode_patch *p = find_patch(0);
                if (p) {
                        memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
                        memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
@@ -722,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
 {
        char fw_name[36] = "amd-ucode/microcode_amd.bin";
        struct cpuinfo_x86 *c = &cpu_data(cpu);
+       bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
        enum ucode_state ret = UCODE_NFOUND;
        const struct firmware *fw;
 
        /* reload ucode container only on the boot cpu */
-       if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
+       if (!refresh_fw || !bsp)
                return UCODE_OK;
 
        if (c->x86 >= 0x15)
@@ -743,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
                goto fw_release;
        }
 
-       ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
+       ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
 
  fw_release:
        release_firmware(fw);
index ff40e74c9181f0e009b51909a0e76ce25c1c2cf3..ffeae818aa7a95ffd0395ae9af5fcedd9b599981 100644 (file)
@@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all)
 
        printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
        printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
-               smp_processor_id());
+               raw_smp_processor_id());
 
        printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
                regs->ax, regs->bx, regs->cx, regs->dx);
index c329d28949056e2d6ad4688e411c5644b81e4d68..d24c8742d9b0aa6df35d5e479e627ff008ea221f 100644 (file)
@@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
 
 static void cancel_hv_timer(struct kvm_lapic *apic)
 {
+       preempt_disable();
        kvm_x86_ops->cancel_hv_timer(apic->vcpu);
        apic->lapic_timer.hv_timer_in_use = false;
+       preempt_enable();
 }
 
 static bool start_hv_timer(struct kvm_lapic *apic)
@@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        for (i = 0; i < KVM_APIC_LVT_NUM; i++)
                kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
        apic_update_lvtt(apic);
-       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
+       if (kvm_vcpu_is_reset_bsp(vcpu) &&
+           kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
                kvm_lapic_set_reg(apic, APIC_LVT0,
                             SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
        apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
index 183ddb235fb48658028433d451db75d554152c54..ba9891ac5c568f1798555bfa9dcbc421fff5ae2a 100644 (file)
@@ -1807,7 +1807,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
         * AMD's VMCB does not have an explicit unusable field, so emulate it
         * for cross vendor migration purposes by "not present"
         */
-       var->unusable = !var->present || (var->type == 0);
+       var->unusable = !var->present;
 
        switch (seg) {
        case VCPU_SREG_TR:
@@ -1840,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
                 */
                if (var->unusable)
                        var->db = 0;
+               /* This is symmetric with svm_set_segment() */
                var->dpl = to_svm(vcpu)->vmcb->save.cpl;
                break;
        }
@@ -1980,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
        s->base = var->base;
        s->limit = var->limit;
        s->selector = var->selector;
-       if (var->unusable)
-               s->attrib = 0;
-       else {
-               s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
-               s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
-               s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
-               s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
-               s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
-               s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
-               s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
-               s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
-       }
+       s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+       s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+       s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+       s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
+       s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+       s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+       s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+       s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
 
        /*
         * This is always accurate, except if SYSRET returned to a segment
@@ -2000,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
         * would entail passing the CPL to userspace and back.
         */
        if (seg == VCPU_SREG_SS)
-               svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+               /* This is symmetric with svm_get_segment() */
+               svm->vmcb->save.cpl = (var->dpl & 3);
 
        mark_dirty(svm->vmcb, VMCB_SEG);
 }
index 72f78396bc0960968161b66ccee00c42fa203fb7..9b4b5d6dcd34755acc0c09525ca93b3408ee4128 100644 (file)
@@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-/*
- * This function performs the various checks including
- * - if it's 4KB aligned
- * - No bits beyond the physical address width are set
- * - Returns 0 on success or else 1
- * (Intel SDM Section 30.3)
- */
-static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
-                                 gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
 {
        gva_t gva;
-       gpa_t vmptr;
        struct x86_exception e;
-       struct page *page;
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-       int maxphyaddr = cpuid_maxphyaddr(vcpu);
 
        if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
                        vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
                return 1;
 
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
-                               sizeof(vmptr), &e)) {
+       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
+                               sizeof(*vmpointer), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
 
-       switch (exit_reason) {
-       case EXIT_REASON_VMON:
-               /*
-                * SDM 3: 24.11.5
-                * The first 4 bytes of VMXON region contain the supported
-                * VMCS revision identifier
-                *
-                * Note - IA32_VMX_BASIC[48] will never be 1
-                * for the nested case;
-                * which replaces physical address width with 32
-                *
-                */
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               page = nested_get_page(vcpu, vmptr);
-               if (page == NULL) {
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               if (*(u32 *)kmap(page) != VMCS12_REVISION) {
-                       kunmap(page);
-                       nested_release_page_clean(page);
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               kunmap(page);
-               nested_release_page_clean(page);
-               vmx->nested.vmxon_ptr = vmptr;
-               break;
-       case EXIT_REASON_VMCLEAR:
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMCLEAR_INVALID_ADDRESS);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               if (vmptr == vmx->nested.vmxon_ptr) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMCLEAR_VMXON_POINTER);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               break;
-       case EXIT_REASON_VMPTRLD:
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMPTRLD_INVALID_ADDRESS);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               if (vmptr == vmx->nested.vmxon_ptr) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMPTRLD_VMXON_POINTER);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               break;
-       default:
-               return 1; /* shouldn't happen */
-       }
-
-       if (vmpointer)
-               *vmpointer = vmptr;
        return 0;
 }
 
@@ -7066,6 +6990,8 @@ out_msr_bitmap:
 static int handle_vmon(struct kvm_vcpu *vcpu)
 {
        int ret;
+       gpa_t vmptr;
+       struct page *page;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
                | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
+
+       /*
+        * SDM 3: 24.11.5
+        * The first 4 bytes of VMXON region contain the supported
+        * VMCS revision identifier
+        *
+        * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
+        * which replaces physical address width with 32
+        */
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       page = nested_get_page(vcpu, vmptr);
+       if (page == NULL) {
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+       if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+               kunmap(page);
+               nested_release_page_clean(page);
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+       kunmap(page);
+       nested_release_page_clean(page);
+
+       vmx->nested.vmxon_ptr = vmptr;
        ret = enter_vmx_operation(vcpu);
        if (ret)
                return ret;
@@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
 
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       if (vmptr == vmx->nested.vmxon_ptr) {
+               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
        if (vmptr == vmx->nested.current_vmptr)
                nested_release_vmcs12(vmx);
 
@@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
 
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       if (vmptr == vmx->nested.vmxon_ptr) {
+               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
        if (vmx->nested.current_vmptr != vmptr) {
                struct vmcs12 *new_vmcs12;
                struct page *page;
@@ -7913,11 +7887,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
 {
        unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        int cr = exit_qualification & 15;
-       int reg = (exit_qualification >> 8) & 15;
-       unsigned long val = kvm_register_readl(vcpu, reg);
+       int reg;
+       unsigned long val;
 
        switch ((exit_qualification >> 4) & 3) {
        case 0: /* mov to cr */
+               reg = (exit_qualification >> 8) & 15;
+               val = kvm_register_readl(vcpu, reg);
                switch (cr) {
                case 0:
                        if (vmcs12->cr0_guest_host_mask &
@@ -7972,6 +7948,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
                 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
                 * cr0. Other attempted changes are ignored, with no exit.
                 */
+               val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
                if (vmcs12->cr0_guest_host_mask & 0xe &
                    (val ^ vmcs12->cr0_read_shadow))
                        return true;
index 02363e37d4a61e8271d7fed0a8c534e9dd90f264..a2cd0997343c485051e849551b9fc9d904177fe0 100644 (file)
@@ -8394,10 +8394,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
        if (vcpu->arch.pv.pv_unhalted)
                return true;
 
-       if (atomic_read(&vcpu->arch.nmi_queued))
+       if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+           (vcpu->arch.nmi_pending &&
+            kvm_x86_ops->nmi_allowed(vcpu)))
                return true;
 
-       if (kvm_test_request(KVM_REQ_SMI, vcpu))
+       if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
+           (vcpu->arch.smi_pending && !is_smm(vcpu)))
                return true;
 
        if (kvm_arch_interrupt_allowed(vcpu) &&
index 83a59a67757a77b46f7b7788074294a3a3c8a10c..9b78685b66e663c80ec3a68986a39024fd372b3c 100644 (file)
@@ -65,11 +65,9 @@ static int __init nopat(char *str)
 }
 early_param("nopat", nopat);
 
-static bool __read_mostly __pat_initialized = false;
-
 bool pat_enabled(void)
 {
-       return __pat_initialized;
+       return !!__pat_enabled;
 }
 EXPORT_SYMBOL_GPL(pat_enabled);
 
@@ -227,14 +225,13 @@ static void pat_bsp_init(u64 pat)
        }
 
        wrmsrl(MSR_IA32_CR_PAT, pat);
-       __pat_initialized = true;
 
        __init_cache_modes(pat);
 }
 
 static void pat_ap_init(u64 pat)
 {
-       if (!this_cpu_has(X86_FEATURE_PAT)) {
+       if (!boot_cpu_has(X86_FEATURE_PAT)) {
                /*
                 * If this happens we are on a secondary CPU, but switched to
                 * PAT on the boot CPU. We have no way to undo PAT.
@@ -309,7 +306,7 @@ void pat_init(void)
        u64 pat;
        struct cpuinfo_x86 *c = &boot_cpu_data;
 
-       if (!__pat_enabled) {
+       if (!pat_enabled()) {
                init_cache_modes();
                return;
        }
index 7e76a4d8304bc5add30e5f86d16e4f5b423a24f6..43b96f5f78ba8c9c323c5ae6090c19f3ae290ad0 100644 (file)
@@ -828,9 +828,11 @@ static void __init kexec_enter_virtual_mode(void)
 
        /*
         * We don't do virtual mode, since we don't do runtime services, on
-        * non-native EFI
+        * non-native EFI. With efi=old_map, we don't do runtime services in
+        * kexec kernel because in the initial boot something else might
+        * have been mapped at these virtual addresses.
         */
-       if (!efi_is_native()) {
+       if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
                efi_memmap_unmap();
                clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
                return;
index c488625c9712de4fe150d01df5c260e650967265..eb8dff15a7f63721d0f07845263da1af75670771 100644 (file)
@@ -71,11 +71,13 @@ static void __init early_code_mapping_set_exec(int executable)
 
 pgd_t * __init efi_call_phys_prolog(void)
 {
-       unsigned long vaddress;
-       pgd_t *save_pgd;
+       unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
+       pgd_t *save_pgd, *pgd_k, *pgd_efi;
+       p4d_t *p4d, *p4d_k, *p4d_efi;
+       pud_t *pud;
 
        int pgd;
-       int n_pgds;
+       int n_pgds, i, j;
 
        if (!efi_enabled(EFI_OLD_MEMMAP)) {
                save_pgd = (pgd_t *)read_cr3();
@@ -88,10 +90,49 @@ pgd_t * __init efi_call_phys_prolog(void)
        n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
        save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
 
+       /*
+        * Build 1:1 identity mapping for efi=old_map usage. Note that
+        * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
+        * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
+        * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
+        * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
+        * This means here we can only reuse the PMD tables of the direct mapping.
+        */
        for (pgd = 0; pgd < n_pgds; pgd++) {
-               save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
-               vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
-               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+               addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
+               vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
+               pgd_efi = pgd_offset_k(addr_pgd);
+               save_pgd[pgd] = *pgd_efi;
+
+               p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
+               if (!p4d) {
+                       pr_err("Failed to allocate p4d table!\n");
+                       goto out;
+               }
+
+               for (i = 0; i < PTRS_PER_P4D; i++) {
+                       addr_p4d = addr_pgd + i * P4D_SIZE;
+                       p4d_efi = p4d + p4d_index(addr_p4d);
+
+                       pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
+                       if (!pud) {
+                               pr_err("Failed to allocate pud table!\n");
+                               goto out;
+                       }
+
+                       for (j = 0; j < PTRS_PER_PUD; j++) {
+                               addr_pud = addr_p4d + j * PUD_SIZE;
+
+                               if (addr_pud > (max_pfn << PAGE_SHIFT))
+                                       break;
+
+                               vaddr = (unsigned long)__va(addr_pud);
+
+                               pgd_k = pgd_offset_k(vaddr);
+                               p4d_k = p4d_offset(pgd_k, vaddr);
+                               pud[j] = *pud_offset(p4d_k, vaddr);
+                       }
+               }
        }
 out:
        __flush_tlb_all();
@@ -104,8 +145,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
        /*
         * After the lock is released, the original page table is restored.
         */
-       int pgd_idx;
+       int pgd_idx, i;
        int nr_pgds;
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
 
        if (!efi_enabled(EFI_OLD_MEMMAP)) {
                write_cr3((unsigned long)save_pgd);
@@ -115,9 +159,28 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
 
        nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
 
-       for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
+       for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
+               pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
 
+               if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+                       continue;
+
+               for (i = 0; i < PTRS_PER_P4D; i++) {
+                       p4d = p4d_offset(pgd,
+                                        pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
+
+                       if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+                               continue;
+
+                       pud = (pud_t *)p4d_page_vaddr(*p4d);
+                       pud_free(&init_mm, pud);
+               }
+
+               p4d = (p4d_t *)pgd_page_vaddr(*pgd);
+               p4d_free(&init_mm, p4d);
+       }
+
        kfree(save_pgd);
 
        __flush_tlb_all();
index 26615991d69cc8b024470c921aca227c2756e439..e0cf95a83f3fab918eb73715d188e631e02a1425 100644 (file)
@@ -360,6 +360,9 @@ void __init efi_free_boot_services(void)
                free_bootmem_late(start, size);
        }
 
+       if (!num_entries)
+               return;
+
        new_size = efi.memmap.desc_size * num_entries;
        new_phys = efi_memmap_alloc(num_entries);
        if (!new_phys) {
index c8a32fb345cf5db7bac8d1a7b6a6c5e2b1d1a0fe..78b2e0db4fb2c0adba7f0b7ec089cf61f0f311c8 100644 (file)
@@ -52,7 +52,7 @@ BFQG_FLAG_FNS(idling)
 BFQG_FLAG_FNS(empty)
 #undef BFQG_FLAG_FNS
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
 {
        unsigned long long now;
@@ -67,7 +67,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
        bfqg_stats_clear_waiting(stats);
 }
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
                                                 struct bfq_group *curr_bfqg)
 {
@@ -81,7 +81,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
        bfqg_stats_mark_waiting(stats);
 }
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
 {
        unsigned long long now;
@@ -203,12 +203,30 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
 
 static void bfqg_get(struct bfq_group *bfqg)
 {
-       return blkg_get(bfqg_to_blkg(bfqg));
+       bfqg->ref++;
 }
 
 void bfqg_put(struct bfq_group *bfqg)
 {
-       return blkg_put(bfqg_to_blkg(bfqg));
+       bfqg->ref--;
+
+       if (bfqg->ref == 0)
+               kfree(bfqg);
+}
+
+static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+{
+       /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+       bfqg_get(bfqg);
+
+       blkg_get(bfqg_to_blkg(bfqg));
+}
+
+void bfqg_and_blkg_put(struct bfq_group *bfqg)
+{
+       bfqg_put(bfqg);
+
+       blkg_put(bfqg_to_blkg(bfqg));
 }
 
 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
@@ -312,7 +330,11 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
        if (bfqq) {
                bfqq->ioprio = bfqq->new_ioprio;
                bfqq->ioprio_class = bfqq->new_ioprio_class;
-               bfqg_get(bfqg);
+               /*
+                * Make sure that bfqg and its associated blkg do not
+                * disappear before entity.
+                */
+               bfqg_and_blkg_get(bfqg);
        }
        entity->parent = bfqg->my_entity; /* NULL for root group */
        entity->sched_data = &bfqg->sched_data;
@@ -399,6 +421,8 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
                return NULL;
        }
 
+       /* see comments in bfq_bic_update_cgroup for why refcounting */
+       bfqg_get(bfqg);
        return &bfqg->pd;
 }
 
@@ -426,7 +450,7 @@ void bfq_pd_free(struct blkg_policy_data *pd)
        struct bfq_group *bfqg = pd_to_bfqg(pd);
 
        bfqg_stats_exit(&bfqg->stats);
-       return kfree(bfqg);
+       bfqg_put(bfqg);
 }
 
 void bfq_pd_reset_stats(struct blkg_policy_data *pd)
@@ -496,9 +520,10 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
  * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
  * it on the new one.  Avoid putting the entity on the old group idle tree.
  *
- * Must be called under the queue lock; the cgroup owning @bfqg must
- * not disappear (by now this just means that we are called under
- * rcu_read_lock()).
+ * Must be called under the scheduler lock, to make sure that the blkg
+ * owning @bfqg does not disappear (see comments in
+ * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
+ * objects).
  */
 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                   struct bfq_group *bfqg)
@@ -519,16 +544,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                bfq_deactivate_bfqq(bfqd, bfqq, false, false);
        else if (entity->on_st)
                bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
-       bfqg_put(bfqq_group(bfqq));
+       bfqg_and_blkg_put(bfqq_group(bfqq));
 
-       /*
-        * Here we use a reference to bfqg.  We don't need a refcounter
-        * as the cgroup reference will not be dropped, so that its
-        * destroy() callback will not be invoked.
-        */
        entity->parent = bfqg->my_entity;
        entity->sched_data = &bfqg->sched_data;
-       bfqg_get(bfqg);
+       /* pin down bfqg and its associated blkg  */
+       bfqg_and_blkg_get(bfqg);
 
        if (bfq_bfqq_busy(bfqq)) {
                bfq_pos_tree_add_move(bfqd, bfqq);
@@ -545,8 +566,9 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  * @bic: the bic to move.
  * @blkcg: the blk-cgroup to move to.
  *
- * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
- * has to make sure that the reference to cgroup is valid across the call.
+ * Move bic to blkcg, assuming that bfqd->lock is held; which makes
+ * sure that the reference to cgroup is valid across the call (see
+ * comments in bfq_bic_update_cgroup on this issue)
  *
  * NOTE: an alternative approach might have been to store the current
  * cgroup in bfqq and getting a reference to it, reducing the lookup
@@ -604,6 +626,57 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
                goto out;
 
        bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
+       /*
+        * Update blkg_path for bfq_log_* functions. We cache this
+        * path, and update it here, for the following
+        * reasons. Operations on blkg objects in blk-cgroup are
+        * protected with the request_queue lock, and not with the
+        * lock that protects the instances of this scheduler
+        * (bfqd->lock). This exposes BFQ to the following sort of
+        * race.
+        *
+        * The blkg_lookup performed in bfq_get_queue, protected
+        * through rcu, may happen to return the address of a copy of
+        * the original blkg. If this is the case, then the
+        * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
+        * the blkg, is useless: it does not prevent blk-cgroup code
+        * from destroying both the original blkg and all objects
+        * directly or indirectly referred by the copy of the
+        * blkg.
+        *
+        * On the bright side, destroy operations on a blkg invoke, as
+        * a first step, hooks of the scheduler associated with the
+        * blkg. And these hooks are executed with bfqd->lock held for
+        * BFQ. As a consequence, for any blkg associated with the
+        * request queue this instance of the scheduler is attached
+        * to, we are guaranteed that such a blkg is not destroyed, and
+        * that all the pointers it contains are consistent, while we
+        * are holding bfqd->lock. A blkg_lookup performed with
+        * bfqd->lock held then returns a fully consistent blkg, which
+        * remains consistent until this lock is held.
+        *
+        * Thanks to the last fact, and to the fact that: (1) bfqg has
+        * been obtained through a blkg_lookup in the above
+        * assignment, and (2) bfqd->lock is being held, here we can
+        * safely use the policy data for the involved blkg (i.e., the
+        * field bfqg->pd) to get to the blkg associated with bfqg,
+        * and then we can safely use any field of blkg. After we
+        * release bfqd->lock, even just getting blkg through this
+        * bfqg may cause dangling references to be traversed, as
+        * bfqg->pd may not exist any more.
+        *
+        * In view of the above facts, here we cache, in the bfqg, any
+        * blkg data we may need for this bic, and for its associated
+        * bfq_queue. As of now, we need to cache only the path of the
+        * blkg, which is used in the bfq_log_* functions.
+        *
+        * Finally, note that bfqg itself needs to be protected from
+        * destruction on the blkg_free of the original blkg (which
+        * invokes bfq_pd_free). We use an additional private
+        * refcounter for bfqg, to let it disappear only after no
+        * bfq_queue refers to it any longer.
+        */
+       blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
        bic->blkcg_serial_nr = serial_nr;
 out:
        rcu_read_unlock();
@@ -640,8 +713,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
  * @bfqd: the device data structure with the root group.
  * @bfqg: the group to move from.
  * @st: the service tree with the entities.
- *
- * Needs queue_lock to be taken and reference to be valid over the call.
  */
 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
                                         struct bfq_group *bfqg,
@@ -692,8 +763,7 @@ void bfq_pd_offline(struct blkg_policy_data *pd)
                /*
                 * The idle tree may still contain bfq_queues belonging
                 * to exited task because they never migrated to a different
-                * cgroup from the one being destroyed now.  No one else
-                * can access them so it's safe to act without any lock.
+                * cgroup from the one being destroyed now.
                 */
                bfq_flush_idle_tree(st);
 
index 08ce45096350561896fb6c8959c5c04603e98555..ed93da2462abbc94ab75c10a0d9c7ce251f3f0fb 100644 (file)
@@ -3665,7 +3665,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
 
        kmem_cache_free(bfq_pool, bfqq);
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
-       bfqg_put(bfqg);
+       bfqg_and_blkg_put(bfqg);
 #endif
 }
 
index ae783c06dfd9ca73c9a3832e41e5617c510bdaf5..5c3bf986149215b3d98f753548cba9b4880f6e62 100644 (file)
@@ -759,6 +759,12 @@ struct bfq_group {
        /* must be the first member */
        struct blkg_policy_data pd;
 
+       /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
+       char blkg_path[128];
+
+       /* reference counter (see comments in bfq_bic_update_cgroup) */
+       int ref;
+
        struct bfq_entity entity;
        struct bfq_sched_data sched_data;
 
@@ -838,7 +844,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
-void bfqg_put(struct bfq_group *bfqg);
+void bfqg_and_blkg_put(struct bfq_group *bfqg);
 
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
 extern struct cftype bfq_blkcg_legacy_files[];
@@ -910,20 +916,13 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 
 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do {                    \
-       char __pbuf[128];                                               \
-                                                                       \
-       blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
-       blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
+       blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
                        bfq_bfqq_sync((bfqq)) ? 'S' : 'A',              \
-                         __pbuf, ##args);                              \
+                       bfqq_group(bfqq)->blkg_path, ##args);           \
 } while (0)
 
-#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {                    \
-       char __pbuf[128];                                               \
-                                                                       \
-       blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf));          \
-       blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args);    \
-} while (0)
+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \
+       blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
 
 #else /* CONFIG_BFQ_GROUP_IOSCHED */
 
index 5384713d48bc9929e2a4dc8b1b9f22b2e1c5bcd3..b5009a896a7faa1dfc9fe4320181798cc42ccfa5 100644 (file)
@@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
        if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
                return false;
 
+       if (!bio_sectors(bio))
+               return false;
+
        /* Already protected? */
        if (bio_integrity(bio))
                return false;
index 7c2947128f5813a677a0361eddcd277b5946d03e..0480892e97e501807a7f14f843eb549719f33c81 100644 (file)
@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 
        if (blkg->blkcg != &blkcg_root)
-               blk_exit_rl(&blkg->rl);
+               blk_exit_rl(blkg->q, &blkg->rl);
 
        blkg_rwstat_exit(&blkg->stat_ios);
        blkg_rwstat_exit(&blkg->stat_bytes);
index c7068520794bd0ba060b905f850efaae6a8cbd36..a7421b772d0e0e3f4b8372fbc11aefd83763d30a 100644 (file)
@@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
        if (!rl->rq_pool)
                return -ENOMEM;
 
+       if (rl != &q->root_rl)
+               WARN_ON_ONCE(!blk_get_queue(q));
+
        return 0;
 }
 
-void blk_exit_rl(struct request_list *rl)
+void blk_exit_rl(struct request_queue *q, struct request_list *rl)
 {
-       if (rl->rq_pool)
+       if (rl->rq_pool) {
                mempool_destroy(rl->rq_pool);
+               if (rl != &q->root_rl)
+                       blk_put_queue(q);
+       }
 }
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
index f2224ffd225da8acb9b4775a19125f015cc6ab0a..bb66c96850b18cb419b0e44aab1894169352f9af 100644 (file)
@@ -1461,22 +1461,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
        return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 
-static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
-                                     bool may_sleep)
+static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+                                       struct request *rq,
+                                       blk_qc_t *cookie, bool may_sleep)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_queue_data bd = {
                .rq = rq,
                .last = true,
        };
-       struct blk_mq_hw_ctx *hctx;
        blk_qc_t new_cookie;
        int ret;
+       bool run_queue = true;
+
+       if (blk_mq_hctx_stopped(hctx)) {
+               run_queue = false;
+               goto insert;
+       }
 
        if (q->elevator)
                goto insert;
 
-       if (!blk_mq_get_driver_tag(rq, &hctx, false))
+       if (!blk_mq_get_driver_tag(rq, NULL, false))
                goto insert;
 
        new_cookie = request_to_qc_t(hctx, rq);
@@ -1500,7 +1506,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
 
        __blk_mq_requeue_request(rq);
 insert:
-       blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
+       blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
 }
 
 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1508,7 +1514,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 {
        if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
                rcu_read_lock();
-               __blk_mq_try_issue_directly(rq, cookie, false);
+               __blk_mq_try_issue_directly(hctx, rq, cookie, false);
                rcu_read_unlock();
        } else {
                unsigned int srcu_idx;
@@ -1516,7 +1522,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                might_sleep();
 
                srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
-               __blk_mq_try_issue_directly(rq, cookie, true);
+               __blk_mq_try_issue_directly(hctx, rq, cookie, true);
                srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
        }
 }
@@ -1619,9 +1625,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                blk_mq_put_ctx(data.ctx);
 
-               if (same_queue_rq)
+               if (same_queue_rq) {
+                       data.hctx = blk_mq_map_queue(q,
+                                       same_queue_rq->mq_ctx->cpu);
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
                                        &cookie);
+               }
        } else if (q->nr_hw_queues > 1 && is_sync) {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
@@ -2641,7 +2650,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
        return ret;
 }
 
-void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+                                                       int nr_hw_queues)
 {
        struct request_queue *q;
 
@@ -2665,6 +2675,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_unfreeze_queue(q);
 }
+
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+{
+       mutex_lock(&set->tag_list_lock);
+       __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+       mutex_unlock(&set->tag_list_lock);
+}
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
 /* Enable polling stats and return whether they were already enabled. */
index 712b018e9f5496de893440646e7942e2c752b3b8..283da7fbe03408d9eef71ba3e1a4f863671d761b 100644 (file)
@@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj)
 
        blk_free_queue_stats(q->stats);
 
-       blk_exit_rl(&q->root_rl);
+       blk_exit_rl(q, &q->root_rl);
 
        if (q->queue_tags)
                __blk_queue_free_tags(q);
index fc13dd0c6e3956a84913d9e71132c0f321a67280..a7285bf2831c7bdbb89b753fccb198f8640e8780 100644 (file)
@@ -27,6 +27,13 @@ static int throtl_quantum = 32;
 #define MIN_THROTL_IOPS (10)
 #define DFL_LATENCY_TARGET (-1L)
 #define DFL_IDLE_THRESHOLD (0)
+#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
+#define LATENCY_FILTERED_SSD (0)
+/*
+ * For HD, very small latency comes from sequential IO. Such IO is helpless to
+ * help determine if its IO is impacted by others, hence we ignore the IO
+ */
+#define LATENCY_FILTERED_HD (1000L) /* 1ms */
 
 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
 
@@ -212,6 +219,7 @@ struct throtl_data
        struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
        struct latency_bucket __percpu *latency_buckets;
        unsigned long last_calculate_time;
+       unsigned long filtered_latency;
 
        bool track_bio_latency;
 };
@@ -698,7 +706,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
                                          unsigned long expires)
 {
-       unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice;
+       unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
 
        /*
         * Since we are adjusting the throttle limit dynamically, the sleep
@@ -2281,7 +2289,7 @@ void blk_throtl_bio_endio(struct bio *bio)
                throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
                        bio_op(bio), lat);
 
-       if (tg->latency_target) {
+       if (tg->latency_target && lat >= tg->td->filtered_latency) {
                int bucket;
                unsigned int threshold;
 
@@ -2417,14 +2425,20 @@ void blk_throtl_exit(struct request_queue *q)
 void blk_throtl_register_queue(struct request_queue *q)
 {
        struct throtl_data *td;
+       int i;
 
        td = q->td;
        BUG_ON(!td);
 
-       if (blk_queue_nonrot(q))
+       if (blk_queue_nonrot(q)) {
                td->throtl_slice = DFL_THROTL_SLICE_SSD;
-       else
+               td->filtered_latency = LATENCY_FILTERED_SSD;
+       } else {
                td->throtl_slice = DFL_THROTL_SLICE_HD;
+               td->filtered_latency = LATENCY_FILTERED_HD;
+               for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
+                       td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
+       }
 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
        /* if no low limit, use previous default */
        td->throtl_slice = DFL_THROTL_SLICE_HD;
index 2ed70228e44fc706e6efee71ca000e5e47433217..83c8e1100525f7dd80b9a75e83cd2f8efb0f5969 100644 (file)
@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
 
 int blk_init_rl(struct request_list *rl, struct request_queue *q,
                gfp_t gfp_mask);
-void blk_exit_rl(struct request_list *rl);
+void blk_exit_rl(struct request_queue *q, struct request_list *rl);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                        struct bio *bio);
 void blk_queue_bypass_start(struct request_queue *q);
index da69b079725fbf62a407db76f7c5c430c52be3f9..b7e9c7feeab2acbd1a846d0c31285460aba076ec 100644 (file)
@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 
 /*
- * offset from end of service tree
+ * offset from end of queue service tree for idle class
  */
 #define CFQ_IDLE_DELAY         (NSEC_PER_SEC / 5)
+/* offset from end of group service tree under time slice mode */
+#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
+/* offset from end of group service under IOPS mode */
+#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
 
 /*
  * below this threshold, we consider thinktime immediate
@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
        cfqg->vfraction = max_t(unsigned, vfr, 1);
 }
 
+static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
+{
+       if (!iops_mode(cfqd))
+               return CFQ_SLICE_MODE_GROUP_DELAY;
+       else
+               return CFQ_IOPS_MODE_GROUP_DELAY;
+}
+
 static void
 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
        n = rb_last(&st->rb);
        if (n) {
                __cfqg = rb_entry_cfqg(n);
-               cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
+               cfqg->vdisktime = __cfqg->vdisktime +
+                       cfq_get_cfqg_vdisktime_delay(cfqd);
        } else
                cfqg->vdisktime = st->min_vdisktime;
        cfq_group_service_tree_add(st, cfqg);
index d3a989e718f53518bafe93ddb9efea419e5d9b30..3cd6e12cfc467d27ccf2830fffde82e1aaf0f45e 100644 (file)
@@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey,
         * signature and returns that to us.
         */
        ret = crypto_akcipher_verify(req);
-       if (ret == -EINPROGRESS) {
+       if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
                wait_for_completion(&compl.completion);
                ret = compl.err;
        }
index fa749f47013508d562366fb0484e7ea80535ba0a..cdb27ac4b2266eccff2ba89a5388ec2fbf6d18bc 100644 (file)
@@ -1767,9 +1767,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
                        break;
                case -EINPROGRESS:
                case -EBUSY:
-                       ret = wait_for_completion_interruptible(
-                               &drbg->ctr_completion);
-                       if (!ret && !drbg->ctr_async_err) {
+                       wait_for_completion(&drbg->ctr_completion);
+                       if (!drbg->ctr_async_err) {
                                reinit_completion(&drbg->ctr_completion);
                                break;
                        }
index b7ad808be3d4ec6c3822ce2cc5c0428d8f3b3dd0..3841b5eafa7ee244f605c28fd56c5a8c5dcaba9b 100644 (file)
@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 
        err = crypto_skcipher_encrypt(&data->req);
        if (err == -EINPROGRESS || err == -EBUSY) {
-               err = wait_for_completion_interruptible(
-                       &data->result.completion);
-               if (!err)
-                       err = data->result.err;
+               wait_for_completion(&data->result.completion);
+               err = data->result.err;
        }
 
        if (err)
index 5a968a78652bd23d269fbc871957a4488fb03af6..7abe6650573950674ce41a7511fd9abe2736c451 100644 (file)
@@ -418,11 +418,7 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc,
 
        table_desc->validation_count++;
        if (table_desc->validation_count == 0) {
-               ACPI_ERROR((AE_INFO,
-                           "Table %p, Validation count is zero after increment\n",
-                           table_desc));
                table_desc->validation_count--;
-               return_ACPI_STATUS(AE_LIMIT);
        }
 
        *out_table = table_desc->pointer;
index a9a9ab3399d47ff8087e62d495f1d2ba930fc1d3..d42eeef9d9287815ce5f4c82d8d915ae5deabe51 100644 (file)
@@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
        if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
            (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
             (battery->capacity_now <= battery->alarm)))
-               pm_wakeup_hard_event(&battery->device->dev);
+               pm_wakeup_event(&battery->device->dev, 0);
 
        return result;
 }
index 25aba9b107dd51db522225b5171e7017a5a3ec9d..e19f530f1083a13732328516925e3bbeb6493e14 100644 (file)
@@ -113,7 +113,7 @@ struct acpi_button {
 
 static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
 static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
 
 static unsigned long lid_report_interval __read_mostly = 500;
 module_param(lid_report_interval, ulong, 0644);
@@ -217,7 +217,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
        }
 
        if (state)
-               pm_wakeup_hard_event(&device->dev);
+               pm_wakeup_event(&device->dev, 0);
 
        ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
        if (ret == NOTIFY_DONE)
@@ -402,7 +402,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
                } else {
                        int keycode;
 
-                       pm_wakeup_hard_event(&device->dev);
+                       pm_wakeup_event(&device->dev, 0);
                        if (button->suspended)
                                break;
 
@@ -534,7 +534,6 @@ static int acpi_button_add(struct acpi_device *device)
                lid_device = device;
        }
 
-       device_init_wakeup(&device->dev, true);
        printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
        return 0;
 
index 798d5003a039d876f275fc2d933be71cb7ebfbed..993fd31394c854c99e5ce0c2af824f36c50b7a22 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/pm_qos.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
-#include <linux/suspend.h>
 
 #include "internal.h"
 
@@ -400,7 +399,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
        mutex_lock(&acpi_pm_notifier_lock);
 
        if (adev->wakeup.flags.notifier_present) {
-               pm_wakeup_ws_event(adev->wakeup.ws, 0, true);
+               __pm_wakeup_event(adev->wakeup.ws, 0);
                if (adev->wakeup.context.work.func)
                        queue_pm_work(&adev->wakeup.context.work);
        }
index a6574d62634031ac6e351418b935a333b556b665..097d630ab8867267326121f9f4db2525cf06ef4b 100644 (file)
@@ -663,40 +663,14 @@ static int acpi_freeze_prepare(void)
        acpi_os_wait_events_complete();
        if (acpi_sci_irq_valid())
                enable_irq_wake(acpi_sci_irq);
-
        return 0;
 }
 
-static void acpi_freeze_wake(void)
-{
-       /*
-        * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
-        * that the SCI has triggered while suspended, so cancel the wakeup in
-        * case it has not been a wakeup event (the GPEs will be checked later).
-        */
-       if (acpi_sci_irq_valid() &&
-           !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
-               pm_system_cancel_wakeup();
-}
-
-static void acpi_freeze_sync(void)
-{
-       /*
-        * Process all pending events in case there are any wakeup ones.
-        *
-        * The EC driver uses the system workqueue, so that one needs to be
-        * flushed too.
-        */
-       acpi_os_wait_events_complete();
-       flush_scheduled_work();
-}
-
 static void acpi_freeze_restore(void)
 {
        acpi_disable_wakeup_devices(ACPI_STATE_S0);
        if (acpi_sci_irq_valid())
                disable_irq_wake(acpi_sci_irq);
-
        acpi_enable_all_runtime_gpes();
 }
 
@@ -708,8 +682,6 @@ static void acpi_freeze_end(void)
 static const struct platform_freeze_ops acpi_freeze_ops = {
        .begin = acpi_freeze_begin,
        .prepare = acpi_freeze_prepare,
-       .wake = acpi_freeze_wake,
-       .sync = acpi_freeze_sync,
        .restore = acpi_freeze_restore,
        .end = acpi_freeze_end,
 };
index 1b5ee1e0e5a3073457b0b15da34aadf292a544ba..e414fabf73158d77fba356be2c10354647e81a44 100644 (file)
@@ -333,14 +333,17 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
            container_of(bin_attr, struct acpi_table_attr, attr);
        struct acpi_table_header *table_header = NULL;
        acpi_status status;
+       ssize_t rc;
 
        status = acpi_get_table(table_attr->name, table_attr->instance,
                                &table_header);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
-       return memory_read_from_buffer(buf, count, &offset,
-                                      table_header, table_header->length);
+       rc = memory_read_from_buffer(buf, count, &offset, table_header,
+                       table_header->length);
+       acpi_put_table(table_header);
+       return rc;
 }
 
 static int acpi_table_attr_init(struct kobject *tables_obj,
index 2fc52407306c15c27b9fb0b11c2db4ef4641aeba..c69954023c2e7d8c235aace4b66a1d32298f36eb 100644 (file)
@@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
 {}
 #endif
 
+/*
+ * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
+ * as DUMMY, or detected but eventually get a "link down" and never get up
+ * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
+ * port_map may hold a value of 0x00.
+ *
+ * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
+ * and can significantly reduce the occurrence of the problem.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189471
+ */
+static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
+                                   struct pci_dev *pdev)
+{
+       static const struct dmi_system_id sysids[] = {
+               {
+                       .ident = "Acer Switch Alpha 12",
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                               DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
+                       },
+               },
+               { }
+       };
+
+       if (dmi_check_system(sysids)) {
+               dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
+               if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
+                       hpriv->port_map = 0x7;
+                       hpriv->cap = 0xC734FF02;
+               }
+       }
+}
+
 #ifdef CONFIG_ARM64
 /*
  * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
@@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                         "online status unreliable, applying workaround\n");
        }
 
+
+       /* Acer SA5-271 workaround modifies private_data */
+       acer_sa5_271_workaround(hpriv, pdev);
+
        /* CAP.NP sometimes indicate the index of the last enabled
         * port, at other times, that of the last possible port, so
         * determining the maximum port number requires looking at
index aaa761b9081cc02a75792302c741f7b54ebd9823..cd2eab6aa92ea245e1a3dab839be7fe8aa938cdb 100644 (file)
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
 
        irq = platform_get_irq(pdev, 0);
        if (irq <= 0) {
-               dev_err(dev, "no irq\n");
-               return -EINVAL;
+               if (irq != -EPROBE_DEFER)
+                       dev_err(dev, "no irq\n");
+               return irq;
        }
 
        hpriv->irq = irq;
index 2d83b8c7596567a020300d8dd1aeefeb33a6a055..e157a0e4441916b77b53c402741b74e124012cc9 100644 (file)
@@ -6800,7 +6800,7 @@ static int __init ata_parse_force_one(char **cur,
        }
 
        force_ent->port = simple_strtoul(id, &endp, 10);
-       if (p == endp || *endp != '\0') {
+       if (id == endp || *endp != '\0') {
                *reason = "invalid port/link";
                return -EINVAL;
        }
index b66bcda88320fefa399ac9653eca64d3045a6a96..3b2246dded74fbeed89d53f913c725ab6e5c0082 100644 (file)
@@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev)
        struct ata_host *host;
        struct mv_host_priv *hpriv;
        struct resource *res;
-       void __iomem *mmio;
        int n_ports = 0, irq = 0;
        int rc;
        int port;
@@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev)
         * Get the register base first
         */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       mmio = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(mmio))
-               return PTR_ERR(mmio);
+       if (res == NULL)
+               return -EINVAL;
 
        /* allocate host */
        if (pdev->dev.of_node) {
@@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev)
        hpriv->board_idx = chip_soc;
 
        host->iomap = NULL;
-       hpriv->base = mmio - SATAHC0_REG_BASE;
+       hpriv->base = devm_ioremap(&pdev->dev, res->start,
+                                  resource_size(res));
+       if (!hpriv->base)
+               return -ENOMEM;
+
+       hpriv->base -= SATAHC0_REG_BASE;
 
        hpriv->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(hpriv->clk))
index 5d38245a7a73a7cc4fa0d49255a52c7daead7886..b7939a2c1fab53ff2a94799a261d401c1c440f2e 100644 (file)
@@ -890,7 +890,10 @@ static int sata_rcar_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "failed to get access to sata clock\n");
                return PTR_ERR(priv->clk);
        }
-       clk_prepare_enable(priv->clk);
+
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        host = ata_host_alloc(&pdev->dev, 1);
        if (!host) {
@@ -970,8 +973,11 @@ static int sata_rcar_resume(struct device *dev)
        struct ata_host *host = dev_get_drvdata(dev);
        struct sata_rcar_priv *priv = host->private_data;
        void __iomem *base = priv->base;
+       int ret;
 
-       clk_prepare_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        /* ack and mask */
        iowrite32(0, base + SATAINTSTAT_REG);
@@ -988,8 +994,11 @@ static int sata_rcar_restore(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
        struct sata_rcar_priv *priv = host->private_data;
+       int ret;
 
-       clk_prepare_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        sata_rcar_setup_port(host);
 
index e987a6f55d36747f79b470b0372a1abcff6390d7..9faee1c893e53c8dea6e14d472a73a8b7131bf96 100644 (file)
@@ -1091,6 +1091,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
        if (async_error)
                goto Complete;
 
+       if (pm_wakeup_pending()) {
+               async_error = -EBUSY;
+               goto Complete;
+       }
+
        if (dev->power.syscore || dev->power.direct_complete)
                goto Complete;
 
index 9c36b27996fc2b56a141bb388acf4947a45b104b..c313b600d356260fd9b98fe4848340f9cbdcf9ae 100644 (file)
@@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly;
 /* First wakeup IRQ seen by the kernel in the last cycle. */
 unsigned int pm_wakeup_irq __read_mostly;
 
-/* If greater than 0 and the system is suspending, terminate the suspend. */
-static atomic_t pm_abort_suspend __read_mostly;
+/* If set and the system is suspending, terminate the suspend. */
+static bool pm_abort_suspend __read_mostly;
 
 /*
  * Combined counters of registered wakeup events and wakeup events in progress.
@@ -855,26 +855,20 @@ bool pm_wakeup_pending(void)
                pm_print_active_wakeup_sources();
        }
 
-       return ret || atomic_read(&pm_abort_suspend) > 0;
+       return ret || pm_abort_suspend;
 }
 
 void pm_system_wakeup(void)
 {
-       atomic_inc(&pm_abort_suspend);
+       pm_abort_suspend = true;
        freeze_wake();
 }
 EXPORT_SYMBOL_GPL(pm_system_wakeup);
 
-void pm_system_cancel_wakeup(void)
-{
-       atomic_dec(&pm_abort_suspend);
-}
-
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(void)
 {
+       pm_abort_suspend = false;
        pm_wakeup_irq = 0;
-       if (reset)
-               atomic_set(&pm_abort_suspend, 0);
 }
 
 void pm_system_irq_wakeup(unsigned int irq_number)
index 28d932906f24c19e7250e6c69ec903b4a4460761..ebbd0c3fe0ed997973271ba9b57020d2519843d0 100644 (file)
@@ -608,6 +608,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
  */
 static int loop_flush(struct loop_device *lo)
 {
+       /* loop not yet configured, no running thread, nothing to flush */
+       if (lo->lo_state != Lo_bound)
+               return 0;
        return loop_switch(lo, NULL);
 }
 
index 9a7bb2c2944772cad8124a965bacc17d0aa8f935..f3f191ba8ca4bbe6b7d87a7accc84bd648e4d718 100644 (file)
@@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
        return -ENOSPC;
 }
 
-/* Reset all properties of an NBD device */
-static void nbd_reset(struct nbd_device *nbd)
-{
-       nbd->config = NULL;
-       nbd->tag_set.timeout = 0;
-       queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
-}
-
 static void nbd_bdev_reset(struct block_device *bdev)
 {
        if (bdev->bd_openers > 1)
@@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd)
                        }
                        kfree(config->socks);
                }
-               nbd_reset(nbd);
+               kfree(nbd->config);
+               nbd->config = NULL;
+
+               nbd->tag_set.timeout = 0;
+               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 
                mutex_unlock(&nbd->config_lock);
                nbd_put(nbd);
@@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index)
        disk->fops = &nbd_fops;
        disk->private_data = nbd;
        sprintf(disk->disk_name, "nbd%d", index);
-       nbd_reset(nbd);
        add_disk(disk);
        nbd_total_devices++;
        return index;
index 454bf9c34882f33d673ccbaf0c8afa4f3ee18ad4..c16f74547804ccb957275f6d59b705b0ba35eb6b 100644 (file)
@@ -4023,6 +4023,7 @@ static void rbd_queue_workfn(struct work_struct *work)
 
        switch (req_op(rq)) {
        case REQ_OP_DISCARD:
+       case REQ_OP_WRITE_ZEROES:
                op_type = OBJ_OP_DISCARD;
                break;
        case REQ_OP_WRITE:
@@ -4420,6 +4421,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        q->limits.discard_granularity = segment_size;
        q->limits.discard_alignment = segment_size;
        blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
+       blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
 
        if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
                q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
index d4dbd8d8e524d7b712f9668cbee57c7d722440b2..382c864814d944c79e610eaa434bc356d12bd335 100644 (file)
@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
 
        rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
        if (rc <= 0) {
-               DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+               DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
                DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                if (rc == -ERESTARTSYS)
                        return rc;
@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
        for (i = 0; i < bytes_to_write; i++) {
                rc = wait_for_bulk_out_ready(dev);
                if (rc <= 0) {
-                       DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+                       DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
                               rc);
                        DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                        if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
        rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
 
        if (rc <= 0) {
-               DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+               DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
                DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                if (rc == -ERESTARTSYS)
                        return rc;
index 0ab0249189072befe3cee1b8696052727f360540..a561f0c2f428df6cbd80e0fe5bfd3c479a578e18 100644 (file)
@@ -1097,12 +1097,16 @@ static void add_interrupt_bench(cycles_t start)
 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
 {
        __u32 *ptr = (__u32 *) regs;
+       unsigned long flags;
 
        if (regs == NULL)
                return 0;
+       local_irq_save(flags);
        if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
                f->reg_idx = 0;
-       return *(ptr + f->reg_idx++);
+       ptr += f->reg_idx++;
+       local_irq_restore(flags);
+       return *ptr;
 }
 
 void add_interrupt_randomness(int irq, int irq_flags)
index 0e3f6496524d92c7c1717d8d2259684952d7acb8..26b643d57847de0fca4afc099f4bec49d6326be9 100644 (file)
@@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
            list_empty(&cpufreq_policy_list)) {
                /* if all ->init() calls failed, unregister */
+               ret = -ENODEV;
                pr_debug("%s: No CPU initialized for driver %s\n", __func__,
                         driver_data->name);
                goto err_if_unreg;
index b7de5bd76a31743f52cc9095845495e596d0817d..eb1158532de31e7aee418162135a7495b10f9860 100644 (file)
@@ -571,9 +571,10 @@ static inline void update_turbo_state(void)
 static int min_perf_pct_min(void)
 {
        struct cpudata *cpu = all_cpu_data[0];
+       int turbo_pstate = cpu->pstate.turbo_pstate;
 
-       return DIV_ROUND_UP(cpu->pstate.min_pstate * 100,
-                           cpu->pstate.turbo_pstate);
+       return turbo_pstate ?
+               DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
 }
 
 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
index 1b9bcd76c60e334e72a31a3b6ff1a21410ac23ff..c2dd43f3f5d8a3092e6847f18d124f0631bdf065 100644 (file)
@@ -127,7 +127,12 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                return PTR_ERR(priv.cpu_clk);
        }
 
-       clk_prepare_enable(priv.cpu_clk);
+       err = clk_prepare_enable(priv.cpu_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare cpuclk\n");
+               return err;
+       }
+
        kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
 
        priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
@@ -137,7 +142,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                goto out_cpu;
        }
 
-       clk_prepare_enable(priv.ddr_clk);
+       err = clk_prepare_enable(priv.ddr_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare ddrclk\n");
+               goto out_cpu;
+       }
        kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
 
        priv.powersave_clk = of_clk_get_by_name(np, "powersave");
@@ -146,7 +155,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                err = PTR_ERR(priv.powersave_clk);
                goto out_ddr;
        }
-       clk_prepare_enable(priv.powersave_clk);
+       err = clk_prepare_enable(priv.powersave_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare powersave clk\n");
+               goto out_ddr;
+       }
 
        of_node_put(np);
        np = NULL;
index d37e8dda807900fe9725aa153c20c1c2bc927a52..ec240592f5c8e7a450e2c26c20feece9d12dafad 100644 (file)
@@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
        struct dma_device       dma_dev;
        bool                    m2m;
        int                     (*hw_setup)(struct ep93xx_dma_chan *);
+       void                    (*hw_synchronize)(struct ep93xx_dma_chan *);
        void                    (*hw_shutdown)(struct ep93xx_dma_chan *);
        void                    (*hw_submit)(struct ep93xx_dma_chan *);
        int                     (*hw_interrupt)(struct ep93xx_dma_chan *);
@@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
                | M2P_CONTROL_ENABLE;
        m2p_set_control(edmac, control);
 
+       edmac->buffer = 0;
+
        return 0;
 }
 
@@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
        return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 }
 
-static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 {
+       unsigned long flags;
        u32 control;
 
+       spin_lock_irqsave(&edmac->lock, flags);
        control = readl(edmac->regs + M2P_CONTROL);
        control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
        m2p_set_control(edmac, control);
+       spin_unlock_irqrestore(&edmac->lock, flags);
 
        while (m2p_channel_state(edmac) >= M2P_STATE_ON)
-               cpu_relax();
+               schedule();
+}
 
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
        m2p_set_control(edmac, 0);
 
-       while (m2p_channel_state(edmac) == M2P_STATE_STALL)
-               cpu_relax();
+       while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
+               dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 }
 
 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
@@ -1160,6 +1169,26 @@ fail:
        return NULL;
 }
 
+/**
+ * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ *
+ * Synchronizes the DMA channel termination to the current context. When this
+ * function returns it is guaranteed that all transfers for previously issued
+ * descriptors have stopped and and it is safe to free the memory associated
+ * with them. Furthermore it is guaranteed that all complete callback functions
+ * for a previously submitted descriptor have finished running and it is safe to
+ * free resources accessed from within the complete callbacks.
+ */
+static void ep93xx_dma_synchronize(struct dma_chan *chan)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+
+       if (edmac->edma->hw_synchronize)
+               edmac->edma->hw_synchronize(edmac);
+}
+
 /**
  * ep93xx_dma_terminate_all - terminate all transactions
  * @chan: channel
@@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
        dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
        dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
        dma_dev->device_config = ep93xx_dma_slave_config;
+       dma_dev->device_synchronize = ep93xx_dma_synchronize;
        dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
        dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
        dma_dev->device_tx_status = ep93xx_dma_tx_status;
@@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
        } else {
                dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
 
+               edma->hw_synchronize = m2p_hw_synchronize;
                edma->hw_setup = m2p_hw_setup;
                edma->hw_shutdown = m2p_hw_shutdown;
                edma->hw_submit = m2p_hw_submit;
index a28a01fcba674dc569e4d49ca6fd50def5a58645..f3e211f8f6c58c00080703f11b25937bb36dab39 100644 (file)
@@ -161,6 +161,7 @@ struct mv_xor_v2_device {
        struct mv_xor_v2_sw_desc *sw_desq;
        int desc_size;
        unsigned int npendings;
+       unsigned int hw_queue_idx;
 };
 
 /**
@@ -213,18 +214,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
        }
 }
 
-/*
- * Return the next available index in the DESQ.
- */
-static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
-{
-       /* read the index for the next available descriptor in the DESQ */
-       u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
-
-       return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
-               & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
-}
-
 /*
  * notify the engine of new descriptors, and update the available index.
  */
@@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
        return MV_XOR_V2_EXT_DESC_SIZE;
 }
 
-/*
- * Set the IMSG threshold
- */
-static inline
-void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
-{
-       u32 reg;
-
-       reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-
-       reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-       reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-
-       writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-}
-
 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 {
        struct mv_xor_v2_device *xor_dev = data;
@@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
        if (!ndescs)
                return IRQ_NONE;
 
-       /*
-        * Update IMSG threshold, to disable new IMSG interrupts until
-        * end of the tasklet
-        */
-       mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
-
        /* schedule a tasklet to handle descriptors callbacks */
        tasklet_schedule(&xor_dev->irq_tasklet);
 
@@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 static dma_cookie_t
 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-       int desq_ptr;
        void *dest_hw_desc;
        dma_cookie_t cookie;
        struct mv_xor_v2_sw_desc *sw_desc =
@@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
        spin_lock_bh(&xor_dev->lock);
        cookie = dma_cookie_assign(tx);
 
-       /* get the next available slot in the DESQ */
-       desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
-
        /* copy the HW descriptor from the SW descriptor to the DESQ */
-       dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
+       dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
 
        memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
 
        xor_dev->npendings++;
+       xor_dev->hw_queue_idx++;
+       if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
+               xor_dev->hw_queue_idx = 0;
 
        spin_unlock_bh(&xor_dev->lock);
 
@@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc     *
 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
 {
        struct mv_xor_v2_sw_desc *sw_desc;
+       bool found = false;
 
        /* Lock the channel */
        spin_lock_bh(&xor_dev->lock);
@@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
                return NULL;
        }
 
-       /* get a free SW descriptor from the SW DESQ */
-       sw_desc = list_first_entry(&xor_dev->free_sw_desc,
-                                  struct mv_xor_v2_sw_desc, free_list);
+       list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
+               if (async_tx_test_ack(&sw_desc->async_tx)) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               spin_unlock_bh(&xor_dev->lock);
+               return NULL;
+       }
+
        list_del(&sw_desc->free_list);
 
        /* Release the channel */
        spin_unlock_bh(&xor_dev->lock);
 
-       /* set the async tx descriptor */
-       dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
-       sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
-       async_tx_ack(&sw_desc->async_tx);
-
        return sw_desc;
 }
 
@@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
                __func__, len, &src, &dest, flags);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        sw_desc->async_tx.flags = flags;
 
@@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
                __func__, src_cnt, len, &dest, flags);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        sw_desc->async_tx.flags = flags;
 
@@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
                container_of(chan, struct mv_xor_v2_device, dmachan);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        /* set the HW descriptor */
        hw_descriptor = &sw_desc->hw_desc;
@@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
 {
        struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
        int pending_ptr, num_of_pending, i;
-       struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
        struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
 
        dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
@@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
        /* get the pending descriptors parameters */
        num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
 
-       /* next HW descriptor */
-       next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
-
        /* loop over free descriptors */
        for (i = 0; i < num_of_pending; i++) {
-
-               if (pending_ptr > MV_XOR_V2_DESC_NUM)
-                       pending_ptr = 0;
-
-               if (next_pending_sw_desc != NULL)
-                       next_pending_hw_desc++;
+               struct mv_xor_v2_descriptor *next_pending_hw_desc =
+                       xor_dev->hw_desq_virt + pending_ptr;
 
                /* get the SW descriptor related to the HW descriptor */
                next_pending_sw_desc =
@@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
 
                /* increment the next descriptor */
                pending_ptr++;
+               if (pending_ptr >= MV_XOR_V2_DESC_NUM)
+                       pending_ptr = 0;
        }
 
        if (num_of_pending != 0) {
                /* free the descriptores */
                mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
        }
-
-       /* Update IMSG threshold, to enable new IMSG interrupts */
-       mv_xor_v2_set_imsg_thrd(xor_dev, 0);
 }
 
 /*
@@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
        writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
 
-       /* enable the DMA engine */
-       writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
-
        /*
         * This is a temporary solution, until we activate the
         * SMMU. Set the attributes for reading & writing data buffers
@@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
        reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
        writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
 
+       /* enable the DMA engine */
+       writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
        return 0;
 }
 
@@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, xor_dev);
 
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+       if (ret)
+               return ret;
+
        xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
@@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 
        /* add all SW descriptors to the free list */
        for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
-               xor_dev->sw_desq[i].idx = i;
-               list_add(&xor_dev->sw_desq[i].free_list,
+               struct mv_xor_v2_sw_desc *sw_desc =
+                       xor_dev->sw_desq + i;
+               sw_desc->idx = i;
+               dma_async_tx_descriptor_init(&sw_desc->async_tx,
+                                            &xor_dev->dmachan);
+               sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+               async_tx_ack(&sw_desc->async_tx);
+
+               list_add(&sw_desc->free_list,
                         &xor_dev->free_sw_desc);
        }
 
index 8b0da7fa520d27ac514228130c354f121a12b848..e90a7a0d760af6d031fa465208e88f2dd6f056b4 100644 (file)
@@ -3008,7 +3008,8 @@ static int pl330_remove(struct amba_device *adev)
 
        for (i = 0; i < AMBA_NR_IRQS; i++) {
                irq = adev->irq[i];
-               devm_free_irq(&adev->dev, irq, pl330);
+               if (irq)
+                       devm_free_irq(&adev->dev, irq, pl330);
        }
 
        dma_async_device_unregister(&pl330->ddma);
index db41795fe42ae6ed355de41f12b5c90ea661bde4..bd261c9e9664b6ac951939641091bc0bb7466380 100644 (file)
@@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
        if (desc->hwdescs.use) {
                dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
                        RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+               if (dptr == 0)
+                       dptr = desc->nchunks;
+               dptr--;
                WARN_ON(dptr >= desc->nchunks);
        } else {
                running = desc->running;
index 72c649713aceecd75a20957522792702fca1696a..31a145154e9f26a8562e51223cffa50e65352075 100644 (file)
@@ -117,7 +117,7 @@ struct usb_dmac {
 #define USB_DMASWR                     0x0008
 #define USB_DMASWR_SWR                 (1 << 0)
 #define USB_DMAOR                      0x0060
-#define USB_DMAOR_AE                   (1 << 2)
+#define USB_DMAOR_AE                   (1 << 1)
 #define USB_DMAOR_DME                  (1 << 0)
 
 #define USB_DMASAR                     0x0000
index 44c01390d0353fd3170fc797eb4ce6393229bd14..dc269cb288c209d60e780eff287af2930fb4c477 100644 (file)
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,               0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,     0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,      0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,                0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0400, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,                0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,          0444, DMI_BOARD_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(board_version,       0444, DMI_BOARD_VERSION);
@@ -191,6 +192,7 @@ static void __init dmi_id_init_attr_table(void)
        ADD_DMI_ATTR(product_version,   DMI_PRODUCT_VERSION);
        ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
        ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
+       ADD_DMI_ATTR(product_family,      DMI_PRODUCT_FAMILY);
        ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
        ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
        ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
index 54be60ead08f8068c18dc9bbfd5a40e3cb26685c..93f7acdaac7ac19c057fc6b98a76c270a4646f24 100644 (file)
@@ -430,6 +430,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
                dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
                dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
                dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+               dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
                break;
        case 2:         /* Base Board Information */
                dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
index 04ca8764f0c096f4e3f006ab74e4dc55996735a1..8bf27323f7a37c34591c45f8b39d2091ae096260 100644 (file)
@@ -36,6 +36,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
        if (acpi_disabled)
                return;
 
+       if (!efi_enabled(EFI_BOOT))
+               return;
+
        if (table->length < sizeof(bgrt_tab)) {
                pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
                       table->length, sizeof(bgrt_tab));
index 8c34d50a4d8032bbaba3322b3dee4ff22826a923..959777ec8a77bab62e49097cd93d711ccd311610 100644 (file)
 
 /* BIOS variables */
 static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
-static const efi_char16_t const efi_SecureBoot_name[] = {
+static const efi_char16_t efi_SecureBoot_name[] = {
        'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0
 };
-static const efi_char16_t const efi_SetupMode_name[] = {
+static const efi_char16_t efi_SetupMode_name[] = {
        'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0
 };
 
index a4831fe0223bffebdda4727589453aa6cb8fb7b1..a2c59a08b2bd69919b23989feddff71a88855617 100644 (file)
@@ -220,9 +220,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
 }
 
 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
-       amdgpu_vram_mgr_init,
-       amdgpu_vram_mgr_fini,
-       amdgpu_vram_mgr_new,
-       amdgpu_vram_mgr_del,
-       amdgpu_vram_mgr_debug
+       .init           = amdgpu_vram_mgr_init,
+       .takedown       = amdgpu_vram_mgr_fini,
+       .get_node       = amdgpu_vram_mgr_new,
+       .put_node       = amdgpu_vram_mgr_del,
+       .debug          = amdgpu_vram_mgr_debug
 };
index fb08193599092d0d6562e0c5b4c019b6be45bed2..90332f55cfba91b7a543da4ec3820809bb876336 100644 (file)
@@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle,
 static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
+       u32 v;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
        if (ring == &adev->vce.ring[0])
-               return RREG32(mmVCE_RB_RPTR);
+               v = RREG32(mmVCE_RB_RPTR);
        else if (ring == &adev->vce.ring[1])
-               return RREG32(mmVCE_RB_RPTR2);
+               v = RREG32(mmVCE_RB_RPTR2);
        else
-               return RREG32(mmVCE_RB_RPTR3);
+               v = RREG32(mmVCE_RB_RPTR3);
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return v;
 }
 
 /**
@@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
+       u32 v;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
        if (ring == &adev->vce.ring[0])
-               return RREG32(mmVCE_RB_WPTR);
+               v = RREG32(mmVCE_RB_WPTR);
        else if (ring == &adev->vce.ring[1])
-               return RREG32(mmVCE_RB_WPTR2);
+               v = RREG32(mmVCE_RB_WPTR2);
        else
-               return RREG32(mmVCE_RB_WPTR3);
+               v = RREG32(mmVCE_RB_WPTR3);
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return v;
 }
 
 /**
@@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
+
        if (ring == &adev->vce.ring[0])
                WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
        else if (ring == &adev->vce.ring[1])
                WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
        else
                WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
 }
 
 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
@@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        struct amdgpu_ring *ring;
        int idx, r;
 
-       ring = &adev->vce.ring[0];
-       WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
-
-       ring = &adev->vce.ring[1];
-       WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
-
-       ring = &adev->vce.ring[2];
-       WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
-
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
                WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
+
+               /* Program instance 0 reg space for two instances or instance 0 case
+               program instance 1 reg space for only instance 1 available case */
+               if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
+                       ring = &adev->vce.ring[0];
+                       WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+                       ring = &adev->vce.ring[1];
+                       WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+                       ring = &adev->vce.ring[2];
+                       WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+               }
+
                vce_v3_0_mc_resume(adev, idx);
                WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
index d5f53d04fa08c30a4053aa9fd07b04c795177e4e..83e40fe51b6212f6bbcf44b119c47a86b3fb1638 100644 (file)
@@ -709,17 +709,17 @@ static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
 
 static struct phm_master_table_item
 vega10_thermal_start_thermal_controller_master_list[] = {
-       {NULL, tf_vega10_thermal_initialize},
-       {NULL, tf_vega10_thermal_set_temperature_range},
-       {NULL, tf_vega10_thermal_enable_alert},
+       { .tableFunction = tf_vega10_thermal_initialize },
+       { .tableFunction = tf_vega10_thermal_set_temperature_range },
+       { .tableFunction = tf_vega10_thermal_enable_alert },
 /* We should restrict performance levels to low before we halt the SMC.
  * On the other hand we are still in boot state when we do this
  * so it would be pointless.
  * If this assumption changes we have to revisit this table.
  */
-       {NULL, tf_vega10_thermal_setup_fan_table},
-       {NULL, tf_vega10_thermal_start_smc_fan_control},
-       {NULL, NULL}
+       { .tableFunction = tf_vega10_thermal_setup_fan_table },
+       { .tableFunction = tf_vega10_thermal_start_smc_fan_control },
+       { }
 };
 
 static struct phm_master_table_header
@@ -731,10 +731,10 @@ vega10_thermal_start_thermal_controller_master = {
 
 static struct phm_master_table_item
 vega10_thermal_set_temperature_range_master_list[] = {
-       {NULL, tf_vega10_thermal_disable_alert},
-       {NULL, tf_vega10_thermal_set_temperature_range},
-       {NULL, tf_vega10_thermal_enable_alert},
-       {NULL, NULL}
+       { .tableFunction = tf_vega10_thermal_disable_alert },
+       { .tableFunction = tf_vega10_thermal_set_temperature_range },
+       { .tableFunction = tf_vega10_thermal_enable_alert },
+       { }
 };
 
 struct phm_master_table_header
index 8be9719284b047f3d6046fd6a22c80bb617fa0f7..aa885a614e27c9882ea597456c4b2e16fb25b62b 100644 (file)
@@ -508,6 +508,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
                bool has_connectors =
                        !!new_crtc_state->connector_mask;
 
+               WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
                if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
                        DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
                                         crtc->base.id, crtc->name);
@@ -551,6 +553,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
        for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
                const struct drm_connector_helper_funcs *funcs = connector->helper_private;
 
+               WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
                /*
                 * This only sets crtc->connectors_changed for routing changes,
                 * drivers must set crtc->connectors_changed themselves when
@@ -650,6 +654,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
        for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
 
+               WARN_ON(!drm_modeset_is_locked(&plane->mutex));
+
                funcs = plane->helper_private;
 
                drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
@@ -2663,7 +2669,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
 
        drm_modeset_acquire_init(&ctx, 0);
        while (1) {
+               err = drm_modeset_lock_all_ctx(dev, &ctx);
+               if (err)
+                       goto out;
+
                err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+out:
                if (err != -EDEADLK)
                        break;
 
index 3e5f52110ea17384c84f568ba9b1a4955922523c..213fb837e1c40fe79bf536d54b083d99dee1c192 100644 (file)
@@ -1208,3 +1208,86 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
        return 0;
 }
 EXPORT_SYMBOL(drm_dp_stop_crc);
+
+struct dpcd_quirk {
+       u8 oui[3];
+       bool is_branch;
+       u32 quirks;
+};
+
+#define OUI(first, second, third) { (first), (second), (third) }
+
+static const struct dpcd_quirk dpcd_quirk_list[] = {
+       /* Analogix 7737 needs reduced M and N at HBR2 link rates */
+       { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) },
+};
+
+#undef OUI
+
+/*
+ * Get a bit mask of DPCD quirks for the sink/branch device identified by
+ * ident. The quirk data is shared but it's up to the drivers to act on the
+ * data.
+ *
+ * For now, only the OUI (first three bytes) is used, but this may be extended
+ * to device identification string and hardware/firmware revisions later.
+ */
+static u32
+drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
+{
+       const struct dpcd_quirk *quirk;
+       u32 quirks = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) {
+               quirk = &dpcd_quirk_list[i];
+
+               if (quirk->is_branch != is_branch)
+                       continue;
+
+               if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0)
+                       continue;
+
+               quirks |= quirk->quirks;
+       }
+
+       return quirks;
+}
+
+/**
+ * drm_dp_read_desc - read sink/branch descriptor from DPCD
+ * @aux: DisplayPort AUX channel
+ * @desc: Device decriptor to fill from DPCD
+ * @is_branch: true for branch devices, false for sink devices
+ *
+ * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the
+ * identification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+                    bool is_branch)
+{
+       struct drm_dp_dpcd_ident *ident = &desc->ident;
+       unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
+       int ret, dev_id_len;
+
+       ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+       if (ret < 0)
+               return ret;
+
+       desc->quirks = drm_dp_get_quirks(ident, is_branch);
+
+       dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
+
+       DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+                     is_branch ? "branch" : "sink",
+                     (int)sizeof(ident->oui), ident->oui,
+                     dev_id_len, ident->device_id,
+                     ident->hw_rev >> 4, ident->hw_rev & 0xf,
+                     ident->sw_major_rev, ident->sw_minor_rev,
+                     desc->quirks);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_read_desc);
index b5c6bb46a4251bdac218832ba61e52109e5a1768..37b8ad3e30d80440aea9ea2654a7a99696b50a57 100644 (file)
@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
 void drm_unplug_dev(struct drm_device *dev)
 {
        /* for a USB device */
-       drm_dev_unregister(dev);
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_modeset_unregister_all(dev);
+
+       drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
+       drm_minor_unregister(dev, DRM_MINOR_RENDER);
+       drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 
        mutex_lock(&drm_global_mutex);
 
index 09d3c4c3c858e8a05dcf4cbc9a14feb5910ff132..50294a7bd29da10f99e9dca59701b66c385bb66b 100644 (file)
@@ -82,14 +82,9 @@ err_file_priv_free:
        return ret;
 }
 
-static void exynos_drm_preclose(struct drm_device *dev,
-                                       struct drm_file *file)
-{
-       exynos_drm_subdrv_close(dev, file);
-}
-
 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 {
+       exynos_drm_subdrv_close(dev, file);
        kfree(file->driver_priv);
        file->driver_priv = NULL;
 }
@@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
                                  | DRIVER_ATOMIC | DRIVER_RENDER,
        .open                   = exynos_drm_open,
-       .preclose               = exynos_drm_preclose,
        .lastclose              = exynos_drm_lastclose,
        .postclose              = exynos_drm_postclose,
        .gem_free_object_unlocked = exynos_drm_gem_free_object,
index cb317693059696b3c86bda9c93c2e5ebcefad921..39c740572034a6d4f3d69f2ab861b1d0a8f80803 100644 (file)
@@ -160,12 +160,9 @@ struct exynos_drm_clk {
  *     drm framework doesn't support multiple irq yet.
  *     we can refer to the crtc to current hardware interrupt occurred through
  *     this pipe value.
- * @enabled: if the crtc is enabled or not
- * @event: vblank event that is currently queued for flip
- * @wait_update: wait all pending planes updates to finish
- * @pending_update: number of pending plane updates in this crtc
  * @ops: pointer to callbacks for exynos drm specific functionality
  * @ctx: A pointer to the crtc's implementation specific context
+ * @pipe_clk: A pointer to the crtc's pipeline clock.
  */
 struct exynos_drm_crtc {
        struct drm_crtc                 base;
index fc4fda738906251de7a6047c3fc8f3699c469b79..d404de86d5f9de1d5fe07f856fe4a10fafdefd91 100644 (file)
@@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
 {
        struct device *dev = dsi->dev;
        struct device_node *node = dev->of_node;
-       struct device_node *ep;
        int ret;
 
        ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
        if (ret < 0)
                return ret;
 
-       ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
-       if (!ep) {
-               dev_err(dev, "no output port with endpoint specified\n");
-               return -EINVAL;
-       }
-
-       ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
+       ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
                                     &dsi->burst_clk_rate);
        if (ret < 0)
-               goto end;
+               return ret;
 
-       ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
+       ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
                                     &dsi->esc_clk_rate);
        if (ret < 0)
-               goto end;
-
-       of_node_put(ep);
+               return ret;
 
        dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
        if (!dsi->bridge_node)
                return -EINVAL;
 
-end:
-       of_node_put(ep);
-
-       return ret;
+       return 0;
 }
 
 static int exynos_dsi_bind(struct device *dev, struct device *master,
@@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 
 static int exynos_dsi_remove(struct platform_device *pdev)
 {
+       struct exynos_dsi *dsi = platform_get_drvdata(pdev);
+
+       of_node_put(dsi->bridge_node);
+
        pm_runtime_disable(&pdev->dev);
 
        component_del(&pdev->dev, &exynos_dsi_component_ops);
index 5abc69c9630fc28789a32c87b19e44397d1d58ba..f77dcfaade6c5dfb74d7d600d8181d4fbb006f76 100644 (file)
@@ -760,7 +760,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
         * Get the endpoint node. In our case, dsi has one output port1
         * to which the external HDMI bridge is connected.
         */
-       ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge);
+       ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
        if (ret)
                return ret;
 
index dca989eb2d42ed48f6c13c15fe9d3f8a9cbfaab2..24fe04d6307b0383da918308827a12027ded9fbe 100644 (file)
@@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine;
+       struct intel_vgpu_workload *pos, *n;
+       unsigned int tmp;
+
+       /* free the unsubmited workloads in the queues. */
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+               list_for_each_entry_safe(pos, n,
+                       &vgpu->workload_q_head[engine->id], list) {
+                       list_del_init(&pos->list);
+                       free_workload(pos);
+               }
+       }
+}
+
 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
 {
+       clean_workloads(vgpu, ALL_ENGINES);
        kmem_cache_destroy(vgpu->workloads);
 }
 
@@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine;
-       struct intel_vgpu_workload *pos, *n;
        unsigned int tmp;
 
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
-               /* free the unsubmited workload in the queue */
-               list_for_each_entry_safe(pos, n,
-                       &vgpu->workload_q_head[engine->id], list) {
-                       list_del_init(&pos->list);
-                       free_workload(pos);
-               }
-
+       clean_workloads(vgpu, engine_mask);
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                init_vgpu_execlist(vgpu, engine->id);
-       }
 }
index c995e540ff96e1f8a18a9232de2b26794fa03aa2..0ffd696545927277200d8b2332a024168672c5c5 100644 (file)
@@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       i915_reg_t reg = {.reg = offset};
+       u32 v = *(u32 *)p_data;
+
+       if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
+               return intel_vgpu_default_mmio_write(vgpu,
+                               offset, p_data, bytes);
 
        switch (offset) {
        case 0x4ddc:
-               vgpu_vreg(vgpu, offset) = 0x8000003c;
-               /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
-               I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
                break;
        case 0x42080:
-               vgpu_vreg(vgpu, offset) = 0x8000;
-               /* WaCompressedResourceDisplayNewHashMode:skl */
-               I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+               /* bypass WaCompressedResourceDisplayNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
+               break;
+       case 0xe194:
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
+               break;
+       case 0x7014:
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
                break;
        default:
                return -EINVAL;
@@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
-       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+                skl_misc_ctl_write);
        MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x6e570, D_BDW_PLUS);
        MMIO_D(0x65f10, D_BDW_PLUS);
 
-       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
+                skl_misc_ctl_write);
        MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
index 3036d4835b0fa7a3b366a31d0b6ed18fc7889ae1..48428672fc6ece0927416d17a8dde8c41f00f500 100644 (file)
@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_fini;
 
        pci_set_drvdata(pdev, &dev_priv->drm);
+       /*
+        * Disable the system suspend direct complete optimization, which can
+        * leave the device suspended skipping the driver's suspend handlers
+        * if the device was already runtime suspended. This is needed due to
+        * the difference in our runtime and system suspend sequence and
+        * becaue the HDA driver may require us to enable the audio power
+        * domain during system suspend.
+        */
+       pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
 
        ret = i915_driver_init_early(dev_priv, ent);
        if (ret < 0)
@@ -1272,10 +1281,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        dev_priv->ipc_enabled = false;
 
-       /* Everything is in place, we can now relax! */
-       DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
-                driver.name, driver.major, driver.minor, driver.patchlevel,
-                driver.date, pci_name(pdev), dev_priv->drm.primary->index);
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
                DRM_INFO("DRM_I915_DEBUG enabled\n");
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
index c9b0949f6c1a2aba281c9a4bbf8d8b2c9ede3785..2c453a4e97d5ba28ca3a21da372f73eed0131268 100644 (file)
@@ -562,7 +562,8 @@ struct intel_link_m_n {
 
 void intel_link_compute_m_n(int bpp, int nlanes,
                            int pixel_clock, int link_clock,
-                           struct intel_link_m_n *m_n);
+                           struct intel_link_m_n *m_n,
+                           bool reduce_m_n);
 
 /* Interface history:
  *
@@ -2990,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
        return false;
 }
 
+static inline bool
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+       if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
+               return true;
+#endif
+       return false;
+}
+
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                                int enable_ppgtt);
 
index b6ac3df18b582534b118ab44aae1dbfe9f75186e..462031cbd77f714b23a3b7645039c0d8dba71f40 100644 (file)
@@ -3298,6 +3298,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 {
        int ret;
 
+       /* If the device is asleep, we have no requests outstanding */
+       if (!READ_ONCE(i915->gt.awake))
+               return 0;
+
        if (flags & I915_WAIT_LOCKED) {
                struct i915_gem_timeline *tl;
 
index a0563e18d753fd84731f8372efc7a938d2898a6b..f1989b8792dd6f21ba1a944113b424fb8dc3184d 100644 (file)
@@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
                gen8_set_pte(&gtt_base[i], scratch_pte);
 }
 
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+       struct drm_i915_private *dev_priv = vm->i915;
+
+       /*
+        * Make sure the internal GAM fifo has been cleared of all GTT
+        * writes before exiting stop_machine(). This guarantees that
+        * any aperture accesses waiting to start in another process
+        * cannot back up behind the GTT writes causing a hang.
+        * The register can be any arbitrary GAM register.
+        */
+       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+       struct i915_address_space *vm;
+       dma_addr_t addr;
+       u64 offset;
+       enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+       struct insert_page *arg = _arg;
+
+       gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+                                         dma_addr_t addr,
+                                         u64 offset,
+                                         enum i915_cache_level level,
+                                         u32 unused)
+{
+       struct insert_page arg = { vm, addr, offset, level };
+
+       stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+       struct i915_address_space *vm;
+       struct sg_table *st;
+       u64 start;
+       enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+       struct insert_entries *arg = _arg;
+
+       gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+                                            struct sg_table *st,
+                                            u64 start,
+                                            enum i915_cache_level level,
+                                            u32 unused)
+{
+       struct insert_entries arg = { vm, st, start, level };
+
+       stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+       struct i915_address_space *vm;
+       u64 start;
+       u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+       struct clear_range *arg = _arg;
+
+       gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+                                         u64 start,
+                                         u64 length)
+{
+       struct clear_range arg = { vm, start, length };
+
+       stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  u64 start, u64 length)
 {
@@ -2313,7 +2408,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
                    appgtt->base.allocate_va_range) {
                        ret = appgtt->base.allocate_va_range(&appgtt->base,
                                                             vma->node.start,
-                                                            vma->node.size);
+                                                            vma->size);
                        if (ret)
                                goto err_pages;
                }
@@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
        ggtt->base.insert_entries = gen8_ggtt_insert_entries;
 
+       /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+       if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+               ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+               ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
+               if (ggtt->base.clear_range != nop_clear_range)
+                       ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+       }
+
        ggtt->invalidate = gen6_ggtt_invalidate;
 
        return ggtt_probe_common(ggtt, size);
@@ -2997,7 +3100,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
 
 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 {
-       i915->ggtt.invalidate = gen6_ggtt_invalidate;
+       if (i915->ggtt.invalidate == guc_ggtt_invalidate)
+               i915->ggtt.invalidate = gen6_ggtt_invalidate;
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
index 129ed303a6c46e2f856eb1abc84990079abefb65..57d9f7f4ef159cd6eb30f9bc0bd10683eec5123f 100644 (file)
@@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
                return;
 
        mutex_unlock(&dev->struct_mutex);
-
-       /* expedite the RCU grace period to free some request slabs */
-       synchronize_rcu_expedited();
 }
 
 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
@@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
                                I915_SHRINK_ACTIVE);
        intel_runtime_pm_put(dev_priv);
 
-       synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
-
        return freed;
 }
 
index a0d6d4317a490bba6487891a4048ddef6b358fe4..fb5231f98c0d620f1ccf03a9872607b1373dc0e2 100644 (file)
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
                        obj->mm.quirked = false;
                }
                if (!i915_gem_object_is_tiled(obj)) {
-                       GEM_BUG_ON(!obj->mm.quirked);
+                       GEM_BUG_ON(obj->mm.quirked);
                        __i915_gem_object_pin_pages(obj);
                        obj->mm.quirked = true;
                }
index fd97fe00cd0d2ad00e1c7258eeb51ecf0f60d4c1..190f6aa5d15eb82bf51cbaed00b16ea8c5d4f5bc 100644 (file)
@@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
        u32 pipestat_mask;
        u32 enable_mask;
        enum pipe pipe;
-       u32 val;
 
        pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
                        PIPE_CRC_DONE_INTERRUPT_STATUS;
@@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 
        enable_mask = I915_DISPLAY_PORT_INTERRUPT |
                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+               I915_LPE_PIPE_A_INTERRUPT |
+               I915_LPE_PIPE_B_INTERRUPT;
+
        if (IS_CHERRYVIEW(dev_priv))
-               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
+                       I915_LPE_PIPE_C_INTERRUPT;
 
        WARN_ON(dev_priv->irq_mask != ~0);
 
-       val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT |
-               I915_LPE_PIPE_C_INTERRUPT);
-
-       enable_mask |= val;
-
        dev_priv->irq_mask = ~enable_mask;
 
        GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
index f87b0c4e564d8b85de91e93f7a8d9a6e6f219b61..1a78363c7f4a9e974edbc4e4f31ec7d64d26b6ad 100644 (file)
@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
 static const struct intel_device_info intel_ironlake_m_info = {
        GEN5_FEATURES,
        .platform = INTEL_IRONLAKE,
-       .is_mobile = 1,
+       .is_mobile = 1, .has_fbc = 1,
 };
 
 #define GEN6_FEATURES \
@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
        .has_hw_contexts = 1, \
        .has_logical_ring_contexts = 1, \
        .has_guc = 1, \
-       .has_decoupled_mmio = 1, \
        .has_aliasing_ppgtt = 1, \
        .has_full_ppgtt = 1, \
        .has_full_48bit_ppgtt = 1, \
index 5a7c63e64381e48a193610305973c468502565d2..65b837e96fe629d58f539b253dc1ab14595a459b 100644 (file)
@@ -8280,7 +8280,7 @@ enum {
 
 /* MIPI DSI registers */
 
-#define _MIPI_PORT(port, a, c) ((port) ? c : a)        /* ports A and C only */
+#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c)    /* ports A and C only */
 #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
 
 #define MIPIO_TXESC_CLK_DIV1                   _MMIO(0x160004)
index 3617927af269afb9872b0d5d419873f0945f880c..569717a1272367a91cf682a9cae7640f9ae32777 100644 (file)
@@ -6101,7 +6101,7 @@ retry:
        pipe_config->fdi_lanes = lane;
 
        intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
-                              link_bw, &pipe_config->fdi_m_n);
+                              link_bw, &pipe_config->fdi_m_n, false);
 
        ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
        if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
@@ -6277,7 +6277,8 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 }
 
 static void compute_m_n(unsigned int m, unsigned int n,
-                       uint32_t *ret_m, uint32_t *ret_n)
+                       uint32_t *ret_m, uint32_t *ret_n,
+                       bool reduce_m_n)
 {
        /*
         * Reduce M/N as much as possible without loss in precision. Several DP
@@ -6285,9 +6286,11 @@ static void compute_m_n(unsigned int m, unsigned int n,
         * values. The passed in values are more likely to have the least
         * significant bits zero than M after rounding below, so do this first.
         */
-       while ((m & 1) == 0 && (n & 1) == 0) {
-               m >>= 1;
-               n >>= 1;
+       if (reduce_m_n) {
+               while ((m & 1) == 0 && (n & 1) == 0) {
+                       m >>= 1;
+                       n >>= 1;
+               }
        }
 
        *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -6298,16 +6301,19 @@ static void compute_m_n(unsigned int m, unsigned int n,
 void
 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
                       int pixel_clock, int link_clock,
-                      struct intel_link_m_n *m_n)
+                      struct intel_link_m_n *m_n,
+                      bool reduce_m_n)
 {
        m_n->tu = 64;
 
        compute_m_n(bits_per_pixel * pixel_clock,
                    link_clock * nlanes * 8,
-                   &m_n->gmch_m, &m_n->gmch_n);
+                   &m_n->gmch_m, &m_n->gmch_n,
+                   reduce_m_n);
 
        compute_m_n(pixel_clock, link_clock,
-                   &m_n->link_m, &m_n->link_n);
+                   &m_n->link_m, &m_n->link_n,
+                   reduce_m_n);
 }
 
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -12197,6 +12203,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
         * type. For DP ports it behaves like most other platforms, but on HDMI
         * there's an extra 1 line difference. So we need to add two instead of
         * one to the value.
+        *
+        * On VLV/CHV DSI the scanline counter would appear to increment
+        * approx. 1/3 of a scanline before start of vblank. Unfortunately
+        * that means we can't tell whether we're in vblank or not while
+        * we're on that particular line. We must still set scanline_offset
+        * to 1 so that the vblank timestamps come out correct when we query
+        * the scanline counter from within the vblank interrupt handler.
+        * However if queried just before the start of vblank we'll get an
+        * answer that's slightly in the future.
         */
        if (IS_GEN2(dev_priv)) {
                const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
index ee77b519835c5fd9d8c582a9c3169b43d06ebab6..fc691b8b317cf3924a98adfb51ca1183a6f2a6b3 100644 (file)
@@ -1507,37 +1507,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
        DRM_DEBUG_KMS("common rates: %s\n", str);
 }
 
-bool
-__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
-{
-       u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
-                                                     DP_SINK_OUI;
-
-       return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
-              sizeof(*desc);
-}
-
-bool intel_dp_read_desc(struct intel_dp *intel_dp)
-{
-       struct intel_dp_desc *desc = &intel_dp->desc;
-       bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
-                      DP_OUI_SUPPORT;
-       int dev_id_len;
-
-       if (!__intel_dp_read_desc(intel_dp, desc))
-               return false;
-
-       dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
-       DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
-                     drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
-                     (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
-                     dev_id_len, desc->device_id,
-                     desc->hw_rev >> 4, desc->hw_rev & 0xf,
-                     desc->sw_major_rev, desc->sw_minor_rev);
-
-       return true;
-}
-
 static int rate_to_index(int find, const int *rates)
 {
        int i = 0;
@@ -1624,6 +1593,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        int common_rates[DP_MAX_SUPPORTED_RATES] = {};
        int common_len;
        uint8_t link_bw, rate_select;
+       bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+                                          DP_DPCD_QUIRK_LIMITED_M_N);
 
        common_len = intel_dp_common_rates(intel_dp, common_rates);
 
@@ -1753,7 +1724,8 @@ found:
        intel_link_compute_m_n(bpp, lane_count,
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
-                              &pipe_config->dp_m_n);
+                              &pipe_config->dp_m_n,
+                              reduce_m_n);
 
        if (intel_connector->panel.downclock_mode != NULL &&
                dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -1761,7 +1733,8 @@ found:
                        intel_link_compute_m_n(bpp, lane_count,
                                intel_connector->panel.downclock_mode->clock,
                                pipe_config->port_clock,
-                               &pipe_config->dp_m2_n2);
+                               &pipe_config->dp_m2_n2,
+                               reduce_m_n);
        }
 
        /*
@@ -3622,7 +3595,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
        if (!intel_dp_read_dpcd(intel_dp))
                return false;
 
-       intel_dp_read_desc(intel_dp);
+       drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+                        drm_dp_is_branch(intel_dp->dpcd));
 
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
                dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -4624,7 +4598,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
 
        intel_dp_print_rates(intel_dp);
 
-       intel_dp_read_desc(intel_dp);
+       drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+                        drm_dp_is_branch(intel_dp->dpcd));
 
        intel_dp_configure_mst(intel_dp);
 
index c1f62eb07c07a7ce49b3ed39e1e6ee2b23eb65e8..989e25577ac0445f9e7632a575a6de38d7f9ec49 100644 (file)
@@ -44,6 +44,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        int lane_count, slots;
        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        int mst_pbn;
+       bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+                                          DP_DPCD_QUIRK_LIMITED_M_N);
 
        pipe_config->has_pch_encoder = false;
        bpp = 24;
@@ -75,7 +77,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        intel_link_compute_m_n(bpp, lane_count,
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
-                              &pipe_config->dp_m_n);
+                              &pipe_config->dp_m_n,
+                              reduce_m_n);
 
        pipe_config->dp_m_n.tu = slots;
 
index aaee3949a42267603a5dfa9deb9be87dd0f7b2b4..f630c7af50205540b64481d9c3ee559fbdfc8f7f 100644 (file)
@@ -906,14 +906,6 @@ enum link_m_n_set {
        M2_N2
 };
 
-struct intel_dp_desc {
-       u8 oui[3];
-       u8 device_id[6];
-       u8 hw_rev;
-       u8 sw_major_rev;
-       u8 sw_minor_rev;
-} __packed;
-
 struct intel_dp_compliance_data {
        unsigned long edid;
        uint8_t video_pattern;
@@ -957,7 +949,7 @@ struct intel_dp {
        /* Max link BW for the sink as per DPCD registers */
        int max_sink_link_bw;
        /* sink or branch descriptor */
-       struct intel_dp_desc desc;
+       struct drm_dp_desc desc;
        struct drm_dp_aux aux;
        enum intel_display_power_domain aux_power_domain;
        uint8_t train_set[4];
@@ -1532,9 +1524,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 }
 
 bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
-bool __intel_dp_read_desc(struct intel_dp *intel_dp,
-                         struct intel_dp_desc *desc);
-bool intel_dp_read_desc(struct intel_dp *intel_dp);
 int intel_dp_link_required(int pixel_clock, int bpp);
 int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
index 854e8e0c836bd2099c1cfcb72e12e3ec5ff21915..f94eacff196c5d0980690ae95cda45c42e3a4e9b 100644 (file)
@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
        return 0;
 }
 
+static bool ring_is_idle(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       bool idle = true;
+
+       intel_runtime_pm_get(dev_priv);
+
+       /* No bit for gen2, so assume the CS parser is idle */
+       if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+               idle = false;
+
+       intel_runtime_pm_put(dev_priv);
+
+       return idle;
+}
+
 /**
  * intel_engine_is_idle() - Report if the engine has finished process all work
  * @engine: the intel_engine_cs
@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
  */
 bool intel_engine_is_idle(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-
        /* Any inflight/incomplete requests? */
        if (!i915_seqno_passed(intel_engine_get_seqno(engine),
                               intel_engine_last_submit(engine)))
@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
                return false;
 
        /* Ring stopped? */
-       if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+       if (!ring_is_idle(engine))
                return false;
 
        return true;
index ded2add18b26122d7f6395d0d5532da26dd21f34..d93c58410bffe9701d148e546db753dff84c4083 100644 (file)
@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
                                            int *width, int *height)
 {
-       int w, h;
-
-       if (drm_rotation_90_or_270(cache->plane.rotation)) {
-               w = cache->plane.src_h;
-               h = cache->plane.src_w;
-       } else {
-               w = cache->plane.src_w;
-               h = cache->plane.src_h;
-       }
-
        if (width)
-               *width = w;
+               *width = cache->plane.src_w;
        if (height)
-               *height = h;
+               *height = cache->plane.src_h;
 }
 
 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
 
        cache->plane.rotation = plane_state->base.rotation;
+       /*
+        * Src coordinates are already rotated by 270 degrees for
+        * the 90/270 degree plane rotation cases (to match the
+        * GTT mapping), hence no need to account for rotation here.
+        */
        cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
        cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
        cache->plane.visible = plane_state->base.visible;
index 668f00480d97c0ff0418c19dfaaffec31fc65341..292fedf30b0010c33e1eefd8f643b1b87bd38edd 100644 (file)
@@ -149,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
 
 static void lpe_audio_irq_unmask(struct irq_data *d)
 {
-       struct drm_i915_private *dev_priv = d->chip_data;
-       unsigned long irqflags;
-       u32 val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT);
-
-       if (IS_CHERRYVIEW(dev_priv))
-               val |= I915_LPE_PIPE_C_INTERRUPT;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
-       dev_priv->irq_mask &= ~val;
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       POSTING_READ(VLV_IMR);
-
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 static void lpe_audio_irq_mask(struct irq_data *d)
 {
-       struct drm_i915_private *dev_priv = d->chip_data;
-       unsigned long irqflags;
-       u32 val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT);
-
-       if (IS_CHERRYVIEW(dev_priv))
-               val |= I915_LPE_PIPE_C_INTERRUPT;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
-       dev_priv->irq_mask |= val;
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IIR, val);
-       POSTING_READ(VLV_IIR);
-
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 static struct irq_chip lpe_audio_irqchip = {
@@ -330,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
 
        desc = irq_to_desc(dev_priv->lpe_audio.irq);
 
-       lpe_audio_irq_mask(&desc->irq_data);
-
        lpe_audio_platdev_destroy(dev_priv);
 
        irq_free_desc(dev_priv->lpe_audio.irq);
index c8f7c631fc1f8e354cac0038c80aa35d0a1dd0d2..dac4e003c1f317ec402110132bad0c3a734bf52a 100644 (file)
@@ -1989,7 +1989,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
        ce->ring = ring;
        ce->state = vma;
-       ce->initialised = engine->init_context == NULL;
+       ce->initialised |= engine->init_context == NULL;
 
        return 0;
 
index 71cbe9c089320cbc305c827bacd41fcbf1e542ce..5abef482eacf1b24780edea4c40ab7e593a42dc6 100644 (file)
@@ -240,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
                return false;
        }
 
-       intel_dp_read_desc(dp);
+       drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
 
        DRM_DEBUG_KMS("Success: LSPCON init\n");
        return true;
index 570bd603f401d513ac3f08c67fc78d6d1523b762..2ca481b5aa691872d39263605ef67b9c7335cec6 100644 (file)
@@ -4335,10 +4335,18 @@ skl_compute_wm(struct drm_atomic_state *state)
        struct drm_crtc_state *cstate;
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct skl_wm_values *results = &intel_state->wm_results;
+       struct drm_device *dev = state->dev;
        struct skl_pipe_wm *pipe_wm;
        bool changed = false;
        int ret, i;
 
+       /*
+        * When we distrust bios wm we always need to recompute to set the
+        * expected DDB allocations for each CRTC.
+        */
+       if (to_i915(dev)->wm.distrust_bios_wm)
+               changed = true;
+
        /*
         * If this transaction isn't actually touching any CRTC's, don't
         * bother with watermark calculation.  Note that if we pass this
@@ -4349,6 +4357,7 @@ skl_compute_wm(struct drm_atomic_state *state)
         */
        for_each_new_crtc_in_state(state, crtc, cstate, i)
                changed = true;
+
        if (!changed)
                return 0;
 
index c3780d0d2baf752ce9d590b6f6c8db67674ec745..559f1ab42bfc23e005020d9bb3cb88e0f0d57943 100644 (file)
@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
        }
 
        /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
-       if (intel_crtc->config->pipe_src_w > 3200 ||
-                               intel_crtc->config->pipe_src_h > 2000) {
+       if (dev_priv->psr.psr2_support &&
+           (intel_crtc->config->pipe_src_w > 3200 ||
+            intel_crtc->config->pipe_src_h > 2000)) {
                dev_priv->psr.psr2_support = false;
                return false;
        }
index 8c87c717c7cda92c4256cf277828e594f96a0ad1..e6517edcd16b55608c125452b56904f2b48e90df 100644 (file)
@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
  */
 void intel_pipe_update_start(struct intel_crtc *crtc)
 {
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        long timeout = msecs_to_jiffies_timeout(1);
        int scanline, min, max, vblank_start;
        wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+       bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+               intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
        DEFINE_WAIT(wait);
 
        vblank_start = adjusted_mode->crtc_vblank_start;
@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
 
        drm_crtc_vblank_put(&crtc->base);
 
+       /*
+        * On VLV/CHV DSI the scanline counter would appear to
+        * increment approx. 1/3 of a scanline before start of vblank.
+        * The registers still get latched at start of vblank however.
+        * This means we must not write any registers on the first
+        * line of vblank (since not the whole line is actually in
+        * vblank). And unfortunately we can't use the interrupt to
+        * wait here since it will fire too soon. We could use the
+        * frame start interrupt instead since it will fire after the
+        * critical scanline, but that would require more changes
+        * in the interrupt code. So for now we'll just do the nasty
+        * thing and poll for the bad scanline to pass us by.
+        *
+        * FIXME figure out if BXT+ DSI suffers from this as well
+        */
+       while (need_vlv_dsi_wa && scanline == vblank_start)
+               scanline = intel_get_crtc_scanline(crtc);
+
        crtc->debug.scanline_start = scanline;
        crtc->debug.start_vbl_time = ktime_get();
        crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
index 4b7f73aeddac6475db31d184853f833c8ba3d510..f84115261ae78b02591a64cb77de96c3fda167bb 100644 (file)
@@ -59,8 +59,6 @@ struct drm_i915_gem_request;
  *                available in the work queue (note, the queue is shared,
  *                not per-engine). It is OK for this to be nonzero, but
  *                it should not be huge!
- *   q_fail: failed to enqueue a work item. This should never happen,
- *           because we check for space beforehand.
  *   b_fail: failed to ring the doorbell. This should never happen, unless
  *           somehow the hardware misbehaves, or maybe if the GuC firmware
  *           crashes? We probably need to reset the GPU to recover.
index 1afb8b06e3e19bf23ed287277415afb364504b23..12b85b3278cd1cfc53b159253e9152e3d8f1784b 100644 (file)
@@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
 static int igt_ctx_exec(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_object *obj;
+       struct drm_i915_gem_object *obj = NULL;
        struct drm_file *file;
        IGT_TIMEOUT(end_time);
        LIST_HEAD(objects);
@@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg)
                }
 
                for_each_engine(engine, i915, id) {
-                       if (dw == 0) {
+                       if (!obj) {
                                obj = create_test_object(ctx, file, &objects);
                                if (IS_ERR(obj)) {
                                        err = PTR_ERR(obj);
@@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg)
                                goto out_unlock;
                        }
 
-                       if (++dw == max_dwords(obj))
+                       if (++dw == max_dwords(obj)) {
+                               obj = NULL;
                                dw = 0;
+                       }
                        ndwords++;
                }
                ncontexts++;
index 8fb801fab039b10225765b044a4e535cf7a4201d..8b05ecb8fdefccafeed07755d501e8902ccba0c3 100644 (file)
@@ -673,7 +673,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                ret = drm_of_find_panel_or_bridge(child,
                                                  imx_ldb->lvds_mux ? 4 : 2, 0,
                                                  &channel->panel, &channel->bridge);
-               if (ret)
+               if (ret && ret != -ENODEV)
                        return ret;
 
                /* panel ddc only if there is no bridge */
index 808b995a990f5529b303e23cb1085b4b7f478355..b5cc6e12334cf96e8faacc01a1a8fb5dcec48202 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_of.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/iopoll.h>
 #include <linux/irq.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -900,16 +901,12 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
 
 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
 {
-       u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
-
-       while (timeout_ms--) {
-               if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
-                       break;
-
-               usleep_range(2, 4);
-       }
+       int ret;
+       u32 val;
 
-       if (timeout_ms == 0) {
+       ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
+                                4, 2000000);
+       if (ret) {
                DRM_WARN("polling dsi wait not busy timeout!\n");
 
                mtk_dsi_enable(dsi);
index 41a1c03b03476b620a511731518b8d0f7772417d..0a4ffd7241468dcbd064fa3a210f17094d10697b 100644 (file)
@@ -1062,7 +1062,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
        }
 
        err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
-       if (err) {
+       if (err < 0) {
                dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
                        err);
                return err;
index 75382f5f0fcec00a8749df932cfd7dba9eb19542..10b227d83e9ac7af98b8177188bb56d48823f2b1 100644 (file)
@@ -152,7 +152,7 @@ static struct regmap_config meson_regmap_config = {
        .max_register   = 0x1000,
 };
 
-static int meson_drv_bind(struct device *dev)
+static int meson_drv_bind_master(struct device *dev, bool has_components)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct meson_drm *priv;
@@ -233,10 +233,12 @@ static int meson_drv_bind(struct device *dev)
        if (ret)
                goto free_drm;
 
-       ret = component_bind_all(drm->dev, drm);
-       if (ret) {
-               dev_err(drm->dev, "Couldn't bind all components\n");
-               goto free_drm;
+       if (has_components) {
+               ret = component_bind_all(drm->dev, drm);
+               if (ret) {
+                       dev_err(drm->dev, "Couldn't bind all components\n");
+                       goto free_drm;
+               }
        }
 
        ret = meson_plane_create(priv);
@@ -276,6 +278,11 @@ free_drm:
        return ret;
 }
 
+static int meson_drv_bind(struct device *dev)
+{
+       return meson_drv_bind_master(dev, true);
+}
+
 static void meson_drv_unbind(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
@@ -357,6 +364,9 @@ static int meson_drv_probe(struct platform_device *pdev)
                count += meson_probe_remote(pdev, &match, np, remote);
        }
 
+       if (count && !match)
+               return meson_drv_bind_master(&pdev->dev, false);
+
        /* If some endpoints were found, initialize the nodes */
        if (count) {
                dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
index 5b8e23d051f2f3752a180df4abedccefcfebc3ed..0a31cd6d01ce145f3f112b8c19d17dcdd46ea524 100644 (file)
@@ -13,6 +13,7 @@ config DRM_MSM
        select QCOM_SCM
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
+       select PM_OPP
        default y
        help
          DRM/KMS driver for MSM/snapdragon.
index f8f48d014978c0ccd5cc9ffcc3d699b86f779399..9c34d7824988654ab2f8366741724da8ac18b82a 100644 (file)
@@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
        return 0;
 }
 
-static struct irq_domain_ops mdss_hw_irqdomain_ops = {
+static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
        .map = mdss_hw_irqdomain_map,
        .xlate = irq_domain_xlate_onecell,
 };
index a38c5fe6cc19752a9832618af1a4f146bab4a8df..7d3741215387110bb7f7ad622cb54d0d411fb9f1 100644 (file)
@@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
 
        mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
                        sizeof(*mdp5_state), GFP_KERNEL);
+       if (!mdp5_state)
+               return NULL;
 
-       if (mdp5_state && mdp5_state->base.fb)
-               drm_framebuffer_reference(mdp5_state->base.fb);
+       __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
 
        return &mdp5_state->base;
 }
@@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
                        mdp5_pipe_release(state->state, old_hwpipe);
                        mdp5_pipe_release(state->state, old_right_hwpipe);
                }
+       } else {
+               mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+               mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+               mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
        }
 
        return 0;
index 87b5695d4034df0e118475617167ee990ccfc490..9d498eb81906220705d85c57260cd4b1f82fa1fc 100644 (file)
@@ -830,6 +830,7 @@ static struct drm_driver msm_driver = {
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export   = drm_gem_prime_export,
        .gem_prime_import   = drm_gem_prime_import,
+       .gem_prime_res_obj  = msm_gem_prime_res_obj,
        .gem_prime_pin      = msm_gem_prime_pin,
        .gem_prime_unpin    = msm_gem_prime_unpin,
        .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
index 28b6f9ba50664509bb44fa2b5704d3bcc86a67af..1b26ca626528ab5f4435f689d0a213f0e672aaa5 100644 (file)
@@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *msm_gem_prime_vmap(struct drm_gem_object *obj);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
                struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
index 3f299c537b77ae347bce4c92368ed774c7695459..a2f89bac9c160674f5f103f75a04a7a92e7c5b99 100644 (file)
@@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
 }
 
 struct msm_fence {
-       struct msm_fence_context *fctx;
        struct dma_fence base;
+       struct msm_fence_context *fctx;
 };
 
 static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
@@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence)
        return fence_completed(f->fctx, f->base.seqno);
 }
 
-static void msm_fence_release(struct dma_fence *fence)
-{
-       struct msm_fence *f = to_msm_fence(fence);
-       kfree_rcu(f, base.rcu);
-}
-
 static const struct dma_fence_ops msm_fence_ops = {
        .get_driver_name = msm_fence_get_driver_name,
        .get_timeline_name = msm_fence_get_timeline_name,
        .enable_signaling = msm_fence_enable_signaling,
        .signaled = msm_fence_signaled,
        .wait = dma_fence_default_wait,
-       .release = msm_fence_release,
+       .release = dma_fence_free,
 };
 
 struct dma_fence *
index 68e509b3b9e4d08730e3901f46a397519c33e77c..50289a23baf8df27c4bc1aebf067da2b011b8f28 100644 (file)
@@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
        struct msm_gem_object *msm_obj;
        bool use_vram = false;
 
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
        switch (flags & MSM_BO_CACHE_MASK) {
        case MSM_BO_UNCACHED:
        case MSM_BO_CACHED:
@@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 
        size = PAGE_ALIGN(dmabuf->size);
 
+       /* Take mutex so we can modify the inactive list in msm_gem_new_impl */
+       mutex_lock(&dev->struct_mutex);
        ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
+       mutex_unlock(&dev->struct_mutex);
+
        if (ret)
                goto fail;
 
index 60bb290700cef9c32fc2ca0dd2db229a6a7ffedf..13403c6da6c75012fa5f17f4b0b63075ddf20874 100644 (file)
@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
        if (!obj->import_attach)
                msm_gem_put_pages(obj);
 }
+
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+       return msm_obj->resv;
+}
index 1c545ebe6a5a0f875a995b1db3a1204b0b21d57e..7832e6421d250d0bd78400057e46dce07dc2d18c 100644 (file)
@@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                if (!in_fence)
                        return -EINVAL;
 
-               /* TODO if we get an array-fence due to userspace merging multiple
-                * fences, we need a way to determine if all the backing fences
-                * are from our own context..
+               /*
+                * Wait if the fence is from a foreign context, or if the fence
+                * array contains any fence from a foreign context.
                 */
-
-               if (in_fence->context != gpu->fctx->context) {
+               if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
                        ret = dma_fence_wait(in_fence, true);
                        if (ret)
                                return ret;
@@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                        goto out;
                }
 
-               if ((submit_cmd.size + submit_cmd.submit_offset) >=
-                               msm_obj->base.size) {
+               if (!submit_cmd.size ||
+                       ((submit_cmd.size + submit_cmd.submit_offset) >
+                               msm_obj->base.size)) {
                        DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
                        ret = -EINVAL;
                        goto out;
index 97b9c38c6b3ff7e05adf9ea84e8328d19e32b90f..0fdc88d79ca87b3a54709aa4d527db80ca0997dc 100644 (file)
@@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
                gpu->grp_clks[i] = get_clock(dev, name);
 
                /* Remember the key clocks that we need to control later */
-               if (!strcmp(name, "core"))
+               if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
                        gpu->core_clk = gpu->grp_clks[i];
-               else if (!strcmp(name, "rbbmtimer"))
+               else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
                        gpu->rbbmtimer_clk = gpu->grp_clks[i];
 
                ++i;
index 6a567fe347b369a2c01d5e89c67ac0a881a49420..820a4805916f1da8115b798cf3c93d5750ae8196 100644 (file)
@@ -4,6 +4,7 @@
 
 struct nvkm_alarm {
        struct list_head head;
+       struct list_head exec;
        u64 timestamp;
        void (*func)(struct nvkm_alarm *);
 };
index 36268e1802b5afcd65c6b3d623b273c4ac60af87..15a13d09d431c9a8d4822fb5f997bcf3225a2d4c 100644 (file)
@@ -80,7 +80,7 @@ int nouveau_modeset = -1;
 module_param_named(modeset, nouveau_modeset, int, 0400);
 
 MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
-int nouveau_runtime_pm = -1;
+static int nouveau_runtime_pm = -1;
 module_param_named(runpm, nouveau_runtime_pm, int, 0400);
 
 static struct drm_driver driver_stub;
@@ -495,7 +495,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
        nouveau_fbcon_init(dev);
        nouveau_led_init(dev);
 
-       if (nouveau_runtime_pm != 0) {
+       if (nouveau_pmops_runtime()) {
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
                pm_runtime_set_active(dev->dev);
@@ -527,7 +527,7 @@ nouveau_drm_unload(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
 
-       if (nouveau_runtime_pm != 0) {
+       if (nouveau_pmops_runtime()) {
                pm_runtime_get_sync(dev->dev);
                pm_runtime_forbid(dev->dev);
        }
@@ -726,6 +726,14 @@ nouveau_pmops_thaw(struct device *dev)
        return nouveau_do_resume(drm_dev, false);
 }
 
+bool
+nouveau_pmops_runtime()
+{
+       if (nouveau_runtime_pm == -1)
+               return nouveau_is_optimus() || nouveau_is_v1_dsm();
+       return nouveau_runtime_pm == 1;
+}
+
 static int
 nouveau_pmops_runtime_suspend(struct device *dev)
 {
@@ -733,14 +741,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
        int ret;
 
-       if (nouveau_runtime_pm == 0) {
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* are we optimus enabled? */
-       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+       if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
@@ -765,8 +766,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
        struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
        int ret;
 
-       if (nouveau_runtime_pm == 0)
-               return -EINVAL;
+       if (!nouveau_pmops_runtime()) {
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
@@ -796,14 +799,7 @@ nouveau_pmops_runtime_idle(struct device *dev)
        struct nouveau_drm *drm = nouveau_drm(drm_dev);
        struct drm_crtc *crtc;
 
-       if (nouveau_runtime_pm == 0) {
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* are we optimus enabled? */
-       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+       if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
index eadec2f49ad318cf44d3464ff39dbe201e7074cb..a11b6aaed325f17ddf6f82c8fd8ced531191574a 100644 (file)
@@ -108,8 +108,6 @@ nouveau_cli(struct drm_file *fpriv)
 #include <nvif/object.h>
 #include <nvif/device.h>
 
-extern int nouveau_runtime_pm;
-
 struct nouveau_drm {
        struct nouveau_cli client;
        struct drm_device *dev;
@@ -195,6 +193,7 @@ nouveau_drm(struct drm_device *dev)
 
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
+bool nouveau_pmops_runtime(void);
 
 #include <nvkm/core/tegra.h>
 
index a4aacbc0cec8efe603d18152099aaecdeab93dce..02fe0efb9e1643f3a4802b947b3ab306bdd690bd 100644 (file)
@@ -87,7 +87,7 @@ void
 nouveau_vga_init(struct nouveau_drm *drm)
 {
        struct drm_device *dev = drm->dev;
-       bool runtime = false;
+       bool runtime = nouveau_pmops_runtime();
 
        /* only relevant for PCI devices */
        if (!dev->pdev)
@@ -99,10 +99,6 @@ nouveau_vga_init(struct nouveau_drm *drm)
        if (pci_is_thunderbolt_attached(dev->pdev))
                return;
 
-       if (nouveau_runtime_pm == 1)
-               runtime = true;
-       if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
-               runtime = true;
        vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
 
        if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
@@ -113,18 +109,13 @@ void
 nouveau_vga_fini(struct nouveau_drm *drm)
 {
        struct drm_device *dev = drm->dev;
-       bool runtime = false;
+       bool runtime = nouveau_pmops_runtime();
 
        vga_client_register(dev->pdev, NULL, NULL, NULL);
 
        if (pci_is_thunderbolt_attached(dev->pdev))
                return;
 
-       if (nouveau_runtime_pm == 1)
-               runtime = true;
-       if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
-               runtime = true;
-
        vga_switcheroo_unregister_client(dev->pdev);
        if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
                vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
index a7663249b3baf2df1c5c75d87d3b32109984ba97..06e564a9ccb253b3018b45d2f0a96cc53430ab2c 100644 (file)
@@ -2107,7 +2107,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
                                        asyc->set.dither = true;
                        }
                } else {
-                       asyc->set.mask = ~0;
+                       if (asyc)
+                               asyc->set.mask = ~0;
                        asyh->set.mask = ~0;
                }
 
index f2a86eae0a0d624b31cb8ee9a65e6487705a6c1a..2437f7d41ca20de616a7193f2d6fdec6d7daea00 100644 (file)
@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
                /* Move to completed list.  We'll drop the lock before
                 * executing the callback so it can reschedule itself.
                 */
-               list_move_tail(&alarm->head, &exec);
+               list_del_init(&alarm->head);
+               list_add(&alarm->exec, &exec);
        }
 
        /* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
        spin_unlock_irqrestore(&tmr->lock, flags);
 
        /* Execute completed callbacks. */
-       list_for_each_entry_safe(alarm, atemp, &exec, head) {
-               list_del_init(&alarm->head);
+       list_for_each_entry_safe(alarm, atemp, &exec, exec) {
+               list_del(&alarm->exec);
                alarm->func(alarm);
        }
 }
index d8fa7a9c9240bdf53f06d214f5952af7f85e5ee4..ce5f2d1f9994113b6322a708f47f1e23049ef3ba 100644 (file)
@@ -245,8 +245,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
                                      struct drm_connector_state *conn_state)
 {
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
-       struct rockchip_dp_device *dp = to_dp(encoder);
-       int ret;
 
        /*
         * The hardware IC designed that VOP must output the RGB10 video
@@ -258,16 +256,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
 
        s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
        s->output_type = DRM_MODE_CONNECTOR_eDP;
-       if (dp->data->chip_type == RK3399_EDP) {
-               /*
-                * For RK3399, VOP Lit must code the out mode to RGB888,
-                * VOP Big must code the out mode to RGB10.
-                */
-               ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
-                                                       encoder);
-               if (ret > 0)
-                       s->output_mode = ROCKCHIP_OUT_MODE_P888;
-       }
 
        return 0;
 }
index a2169dd3d26b915c851bd089f25373495c188174..14fa1f8351e8df22ab30560fbb6a1906841ba43d 100644 (file)
@@ -615,7 +615,6 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 {
        struct cdn_dp_device *dp = encoder_to_dp(encoder);
        int ret, val;
-       struct rockchip_crtc_state *state;
 
        ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
        if (ret < 0) {
@@ -625,14 +624,10 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 
        DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
                          (ret) ? "LIT" : "BIG");
-       state = to_rockchip_crtc_state(encoder->crtc->state);
-       if (ret) {
+       if (ret)
                val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
-               state->output_mode = ROCKCHIP_OUT_MODE_P888;
-       } else {
+       else
                val = DP_SEL_VOP_LIT << 16;
-               state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
-       }
 
        ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
        if (ret)
index 3f7a82d1e0956e6a37e1478412210955db38aa19..45589d6ce65ed0fd0a7e1be60f83dd03bd3d47b5 100644 (file)
@@ -875,6 +875,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
 static void vop_crtc_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
+       const struct vop_data *vop_data = vop->data;
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
        struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
        u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
@@ -967,6 +968,13 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
                DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
                              s->output_type);
        }
+
+       /*
+        * if vop is not support RGB10 output, need force RGB10 to RGB888.
+        */
+       if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
+           !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
+               s->output_mode = ROCKCHIP_OUT_MODE_P888;
        VOP_CTRL_SET(vop, out_mode, s->output_mode);
 
        VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
index 5a4faa85dbd29d91af08ae51a8c2ac69012cb33c..9979fd0c22821d7efa3d7054468e0914619e0692 100644 (file)
@@ -142,6 +142,9 @@ struct vop_data {
        const struct vop_intr *intr;
        const struct vop_win_data *win;
        unsigned int win_size;
+
+#define VOP_FEATURE_OUTPUT_RGB10       BIT(0)
+       u64 feature;
 };
 
 /* interrupt define */
index 0da44442aab097b8f4b40d67c8995be625bccfcd..bafd698a28b1b491c01823d2be293a41e67c3722 100644 (file)
@@ -275,6 +275,7 @@ static const struct vop_intr rk3288_vop_intr = {
 static const struct vop_data rk3288_vop = {
        .init_table = rk3288_init_reg_table,
        .table_size = ARRAY_SIZE(rk3288_init_reg_table),
+       .feature = VOP_FEATURE_OUTPUT_RGB10,
        .intr = &rk3288_vop_intr,
        .ctrl = &rk3288_ctrl_data,
        .win = rk3288_vop_win_data,
@@ -343,6 +344,7 @@ static const struct vop_reg_data rk3399_init_reg_table[] = {
 static const struct vop_data rk3399_vop_big = {
        .init_table = rk3399_init_reg_table,
        .table_size = ARRAY_SIZE(rk3399_init_reg_table),
+       .feature = VOP_FEATURE_OUTPUT_RGB10,
        .intr = &rk3399_vop_intr,
        .ctrl = &rk3399_ctrl_data,
        /*
index 130d51c5ec6a2dab1211337d71313e6b1de15323..4b948fba9eec274b794a0546fec0179b3de1cadf 100644 (file)
@@ -41,9 +41,9 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20170221"
+#define VMWGFX_DRIVER_DATE "20170607"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 12
+#define VMWGFX_DRIVER_MINOR 13
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
index b6a0806b06bffaf6da9178905f9b2f6bb037d384..a1c68e6a689e32fd0dd4d74c805ee4afd0836a99 100644 (file)
@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
                                return fifo_state->static_buffer;
                        else {
                                fifo_state->dynamic_buffer = vmalloc(bytes);
+                               if (!fifo_state->dynamic_buffer)
+                                       goto out_err;
                                return fifo_state->dynamic_buffer;
                        }
                }
index ef9f3a2a40303290287b5259b7a71d2a8791ddb4..1d2db5d912b03c572b50f9b64b2f5d2a39de1365 100644 (file)
@@ -274,108 +274,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 }
 
 
-
-/**
- * vmw_du_cursor_plane_update() - Update cursor image and location
- *
- * @plane: plane object to update
- * @crtc: owning CRTC of @plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of plane on crtc
- * @crtc_y: y offset of plane on crtc
- * @crtc_w: width of plane rectangle on crtc
- * @crtc_h: height of plane rectangle on crtc
- * @src_x: Not used
- * @src_y: Not used
- * @src_w: Not used
- * @src_h: Not used
- *
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
-                              struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int crtc_x, int crtc_y,
-                              unsigned int crtc_w,
-                              unsigned int crtc_h,
-                              uint32_t src_x, uint32_t src_y,
-                              uint32_t src_w, uint32_t src_h)
-{
-       struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-       struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-       struct vmw_surface *surface = NULL;
-       struct vmw_dma_buffer *dmabuf = NULL;
-       s32 hotspot_x, hotspot_y;
-       int ret;
-
-       hotspot_x = du->hotspot_x + fb->hot_x;
-       hotspot_y = du->hotspot_y + fb->hot_y;
-
-       /* A lot of the code assumes this */
-       if (crtc_w != 64 || crtc_h != 64) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (vmw_framebuffer_to_vfb(fb)->dmabuf)
-               dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
-       else
-               surface = vmw_framebuffer_to_vfbs(fb)->surface;
-
-       if (surface && !surface->snooper.image) {
-               DRM_ERROR("surface not suitable for cursor\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* setup new image */
-       ret = 0;
-       if (surface) {
-               /* vmw_user_surface_lookup takes one reference */
-               du->cursor_surface = surface;
-
-               du->cursor_age = du->cursor_surface->snooper.age;
-
-               ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
-                                             64, 64, hotspot_x, hotspot_y);
-       } else if (dmabuf) {
-               /* vmw_user_surface_lookup takes one reference */
-               du->cursor_dmabuf = dmabuf;
-
-               ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
-                                              hotspot_x, hotspot_y);
-       } else {
-               vmw_cursor_update_position(dev_priv, false, 0, 0);
-               goto out;
-       }
-
-       if (!ret) {
-               du->cursor_x = crtc_x + du->set_gui_x;
-               du->cursor_y = crtc_y + du->set_gui_y;
-
-               vmw_cursor_update_position(dev_priv, true,
-                                          du->cursor_x + hotspot_x,
-                                          du->cursor_y + hotspot_y);
-       }
-
-out:
-       return ret;
-}
-
-
-int vmw_du_cursor_plane_disable(struct drm_plane *plane)
-{
-       if (plane->fb) {
-               drm_framebuffer_unreference(plane->fb);
-               plane->fb = NULL;
-       }
-
-       return -EINVAL;
-}
-
-
 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 {
        vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
@@ -472,18 +370,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 }
 
 
-void
-vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
-                                  struct drm_plane_state *old_state)
-{
-       struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
-       struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-
-       drm_atomic_set_fb_for_plane(plane->state, NULL);
-       vmw_cursor_update_position(dev_priv, false, 0, 0);
-}
-
-
 void
 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
                                  struct drm_plane_state *old_state)
@@ -1498,6 +1384,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
         */
        if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
            dmabuf && only_2d &&
+           mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
            dev_priv->active_display_unit == vmw_du_screen_target) {
                ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
                                              dmabuf, &surface);
index 13f2f1d2818a755012098df126938989ba1fb297..5f8d678ae675156178dc306efc2fc83338c390ba 100644 (file)
@@ -256,10 +256,6 @@ int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
                           u16 *r, u16 *g, u16 *b,
                           uint32_t size,
                           struct drm_modeset_acquire_ctx *ctx);
-int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
-                           uint32_t handle, uint32_t width, uint32_t height,
-                           int32_t hot_x, int32_t hot_y);
-int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
 int vmw_du_connector_set_property(struct drm_connector *connector,
                                  struct drm_property *property,
                                  uint64_t val);
@@ -339,15 +335,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
 /* Universal Plane Helpers */
 void vmw_du_primary_plane_destroy(struct drm_plane *plane);
 void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
-int vmw_du_cursor_plane_disable(struct drm_plane *plane);
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
-                              struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int crtc_x, int crtc_y,
-                              unsigned int crtc_w,
-                              unsigned int crtc_h,
-                              uint32_t src_x, uint32_t src_y,
-                              uint32_t src_w, uint32_t src_h);
 
 /* Atomic Helpers */
 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
@@ -356,8 +343,6 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
                                     struct drm_plane_state *state);
 void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
                                       struct drm_plane_state *old_state);
-void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
-                                       struct drm_plane_state *old_state);
 int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
                                   struct drm_plane_state *new_state);
 void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
index bad31bdf09b6c1d8bd31663c7973e1ebb912f340..50be1f034f9efa701f2c6feda57fe28d8cf6d596 100644 (file)
@@ -56,6 +56,8 @@ enum stdu_content_type {
  * @right: Right side of bounding box.
  * @top: Top side of bounding box.
  * @bottom: Bottom side of bounding box.
+ * @fb_left: Left side of the framebuffer/content bounding box
+ * @fb_top: Top of the framebuffer/content bounding box
  * @buf: DMA buffer when DMA-ing between buffer and screen targets.
  * @sid: Surface ID when copying between surface and screen targets.
  */
@@ -63,6 +65,7 @@ struct vmw_stdu_dirty {
        struct vmw_kms_dirty base;
        SVGA3dTransferType  transfer;
        s32 left, right, top, bottom;
+       s32 fb_left, fb_top;
        u32 pitch;
        union {
                struct vmw_dma_buffer *buf;
@@ -647,7 +650,7 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  *
  * @dirty: The closure structure.
  *
- * This function calculates the bounding box for all the incoming clips
+ * This function calculates the bounding box for all the incoming clips.
  */
 static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 {
@@ -656,11 +659,19 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 
        dirty->num_hits = 1;
 
-       /* Calculate bounding box */
+       /* Calculate destination bounding box */
        ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
        ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
        ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
        ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+
+       /*
+        * Calculate content bounding box.  We only need the top-left
+        * coordinate because width and height will be the same as the
+        * destination bounding box above
+        */
+       ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
+       ddirty->fb_top  = min_t(s32, ddirty->fb_top, dirty->fb_y);
 }
 
 
@@ -697,11 +708,11 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
        src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
        src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
-       src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp;
+       src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
 
        dst_pitch = ddirty->pitch;
        dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
-       dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp;
+       dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
 
 
        /* Figure out the real direction */
@@ -760,7 +771,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        }
 
 out_cleanup:
-       ddirty->left = ddirty->top = S32_MAX;
+       ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
        ddirty->right = ddirty->bottom = S32_MIN;
 }
 
@@ -812,6 +823,7 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
                SVGA3D_READ_HOST_VRAM;
        ddirty.left = ddirty.top = S32_MAX;
        ddirty.right = ddirty.bottom = S32_MIN;
+       ddirty.fb_left = ddirty.fb_top = S32_MAX;
        ddirty.pitch = vfb->base.pitches[0];
        ddirty.buf = buf;
        ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
@@ -1355,6 +1367,11 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
                DRM_ERROR("Failed to bind surface to STDU.\n");
        else
                crtc->primary->fb = plane->state->fb;
+
+       ret = vmw_stdu_update_st(dev_priv, stdu);
+
+       if (ret)
+               DRM_ERROR("Failed to update STDU.\n");
 }
 
 
index 7681341fe32b8725840d70b137782f5f1f316bc0..6b70bd259953580204ccecd4ec4334c73e73eed7 100644 (file)
@@ -1274,11 +1274,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        int ret;
        uint32_t size;
-       uint32_t backup_handle;
+       uint32_t backup_handle = 0;
 
        if (req->multisample_count != 0)
                return -EINVAL;
 
+       if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+               return -EINVAL;
+
        if (unlikely(vmw_user_surface_size == 0))
                vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
                        128;
@@ -1314,12 +1317,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
                ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
                                             &res->backup,
                                             &user_srf->backup_base);
-               if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
-                   res->backup_size) {
-                       DRM_ERROR("Surface backup buffer is too small.\n");
-                       vmw_dmabuf_unreference(&res->backup);
-                       ret = -EINVAL;
-                       goto out_unlock;
+               if (ret == 0) {
+                       if (res->backup->base.num_pages * PAGE_SIZE <
+                           res->backup_size) {
+                               DRM_ERROR("Surface backup buffer is too small.\n");
+                               vmw_dmabuf_unreference(&res->backup);
+                               ret = -EINVAL;
+                               goto out_unlock;
+                       } else {
+                               backup_handle = req->buffer_handle;
+                       }
                }
        } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
                ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1491,7 +1498,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
                                 dev_priv->stdu_max_height);
 
                if (size.width > max_width || size.height > max_height) {
-                       DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u",
+                       DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
                                  size.width, size.height,
                                  max_width, max_height);
                        return -EINVAL;
index 16d556816b5fcaa62758549d9bceaa88bd4bc839..2fb5f432a54c1afd0f7c6104facb1860dbcb3f3a 100644 (file)
@@ -725,15 +725,16 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
        spin_lock_irqsave(&ipu->lock, flags);
 
        val = ipu_cm_read(ipu, IPU_CONF);
-       if (vdi) {
+       if (vdi)
                val |= IPU_CONF_IC_INPUT;
-       } else {
+       else
                val &= ~IPU_CONF_IC_INPUT;
-               if (csi_id == 1)
-                       val |= IPU_CONF_CSI_SEL;
-               else
-                       val &= ~IPU_CONF_CSI_SEL;
-       }
+
+       if (csi_id == 1)
+               val |= IPU_CONF_CSI_SEL;
+       else
+               val &= ~IPU_CONF_CSI_SEL;
+
        ipu_cm_write(ipu, val, IPU_CONF);
 
        spin_unlock_irqrestore(&ipu->lock, flags);
index c55563379e2e3ca2a1957ce777b2ac2c3586d9b7..c35f74c830657f26a3e29c34f7cef7e9f864f71a 100644 (file)
@@ -131,8 +131,6 @@ int ipu_pre_get(struct ipu_pre *pre)
        if (pre->in_use)
                return -EBUSY;
 
-       clk_prepare_enable(pre->clk_axi);
-
        /* first get the engine out of reset and remove clock gating */
        writel(0, pre->regs + IPU_PRE_CTRL);
 
@@ -149,12 +147,7 @@ int ipu_pre_get(struct ipu_pre *pre)
 
 void ipu_pre_put(struct ipu_pre *pre)
 {
-       u32 val;
-
-       val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE;
-       writel(val, pre->regs + IPU_PRE_CTRL);
-
-       clk_disable_unprepare(pre->clk_axi);
+       writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
 
        pre->in_use = false;
 }
@@ -249,6 +242,8 @@ static int ipu_pre_probe(struct platform_device *pdev)
        if (!pre->buffer_virt)
                return -ENOMEM;
 
+       clk_prepare_enable(pre->clk_axi);
+
        pre->dev = dev;
        platform_set_drvdata(pdev, pre);
        mutex_lock(&ipu_pre_list_mutex);
@@ -268,6 +263,8 @@ static int ipu_pre_remove(struct platform_device *pdev)
        available_pres--;
        mutex_unlock(&ipu_pre_list_mutex);
 
+       clk_disable_unprepare(pre->clk_axi);
+
        if (pre->buffer_virt)
                gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
                              IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
index fe40e5e499dd4122ce0f623d9df776c0a0cf2bdf..687705c5079422a82b9977e5694d0cd45b2a47bc 100644 (file)
@@ -275,10 +275,12 @@ config HID_EMS_FF
         - Trio Linker Plus II
 
 config HID_ELECOM
-       tristate "ELECOM BM084 bluetooth mouse"
+       tristate "ELECOM HID devices"
        depends on HID
        ---help---
-       Support for the ELECOM BM084 (bluetooth mouse).
+       Support for ELECOM devices:
+         - BM084 Bluetooth Mouse
+         - DEFT Trackball (Wired and wireless)
 
 config HID_ELO
        tristate "ELO USB 4000/4500 touchscreen"
index 16df6cc902359ea620de3f079be796d32be4783a..a6268f2f7408a520660c6add3c734a8006474393 100644 (file)
@@ -69,6 +69,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_IS_MULTITOUCH            BIT(3)
 #define QUIRK_NO_CONSUMER_USAGES       BIT(4)
 #define QUIRK_USE_KBD_BACKLIGHT                BIT(5)
+#define QUIRK_T100_KEYBOARD            BIT(6)
 
 #define I2C_KEYBOARD_QUIRKS                    (QUIRK_FIX_NOTEBOOK_REPORT | \
                                                 QUIRK_NO_INIT_REPORTS | \
@@ -536,6 +537,8 @@ static void asus_remove(struct hid_device *hdev)
                drvdata->kbd_backlight->removed = true;
                cancel_work_sync(&drvdata->kbd_backlight->work);
        }
+
+       hid_hw_stop(hdev);
 }
 
 static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -548,6 +551,12 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
                rdesc[55] = 0xdd;
        }
+       if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
+                *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
+               hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
+               rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
+       }
+
        return rdesc;
 }
 
@@ -560,6 +569,9 @@ static const struct hid_device_id asus_devices[] = {
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+               USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
+         QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
        { }
 };
 MODULE_DEVICE_TABLE(hid, asus_devices);
index 37084b6457851ebe0d52361e327db9df07b86b2c..04cee65531d761c18e53775ffc784c3c3d993daa 100644 (file)
@@ -1855,6 +1855,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1891,6 +1892,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
index 6e3848a8d8dd1416a0091ce0e7d263325dd8910b..e2c7465df69f3ae74c2cb1979c531b02e2934089 100644 (file)
@@ -1,10 +1,8 @@
 /*
- *  HID driver for Elecom BM084 (bluetooth mouse).
- *  Removes a non-existing horizontal wheel from
- *  the HID descriptor.
- *  (This module is based on "hid-ortek".)
- *
+ *  HID driver for ELECOM devices.
  *  Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
+ *  Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
+ *  Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
  */
 
 /*
 static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
-               hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
-               rdesc[47] = 0x00;
+       switch (hdev->product) {
+       case USB_DEVICE_ID_ELECOM_BM084:
+               /* The BM084 Bluetooth mouse includes a non-existing horizontal
+                * wheel in the HID descriptor. */
+               if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
+                       hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
+                       rdesc[47] = 0x00;
+               }
+               break;
+       case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
+       case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
+               /* The DEFT trackball has eight buttons, but its descriptor only
+                * reports five, disabling the three Fn buttons on the top of
+                * the mouse.
+                *
+                * Apply the following diff to the descriptor:
+                *
+                * Collection (Physical),              Collection (Physical),
+                *     Report ID (1),                      Report ID (1),
+                *     Report Count (5),           ->      Report Count (8),
+                *     Report Size (1),                    Report Size (1),
+                *     Usage Page (Button),                Usage Page (Button),
+                *     Usage Minimum (01h),                Usage Minimum (01h),
+                *     Usage Maximum (05h),        ->      Usage Maximum (08h),
+                *     Logical Minimum (0),                Logical Minimum (0),
+                *     Logical Maximum (1),                Logical Maximum (1),
+                *     Input (Variable),                   Input (Variable),
+                *     Report Count (1),           ->      Report Count (0),
+                *     Report Size (3),                    Report Size (3),
+                *     Input (Constant),                   Input (Constant),
+                *     Report Size (16),                   Report Size (16),
+                *     Report Count (2),                   Report Count (2),
+                *     Usage Page (Desktop),               Usage Page (Desktop),
+                *     Usage (X),                          Usage (X),
+                *     Usage (Y),                          Usage (Y),
+                *     Logical Minimum (-32768),           Logical Minimum (-32768),
+                *     Logical Maximum (32767),            Logical Maximum (32767),
+                *     Input (Variable, Relative),         Input (Variable, Relative),
+                * End Collection,                     End Collection,
+                */
+               if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
+                       hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+                       rdesc[13] = 8; /* Button/Variable Report Count */
+                       rdesc[21] = 8; /* Button/Variable Usage Maximum */
+                       rdesc[29] = 0; /* Button/Constant Report Count */
+               }
+               break;
        }
        return rdesc;
 }
 
 static const struct hid_device_id elecom_devices[] = {
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)},
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, elecom_devices);
index 643390ba749d96c5ddb5fbde68fda45c38569c3d..8ca1e8ce0af24e325957526c125ccf55d9081eb8 100644 (file)
 #define USB_VENDOR_ID_ASUSTEK          0x0b05
 #define USB_DEVICE_ID_ASUSTEK_LCM      0x1726
 #define USB_DEVICE_ID_ASUSTEK_LCM2     0x175b
+#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD    0x17e0
 #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD     0x8585
 #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD     0x0101
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
+#define USB_DEVICE_ID_ELECOM_DEFT_WIRED        0x00fe
+#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS     0x00ff
 
 #define USB_VENDOR_ID_DREAM_CHEEKY     0x1d34
 #define USB_DEVICE_ID_DREAM_CHEEKY_WN  0x0004
index 20b40ad2632503754685b84cc07d8787a4a44515..1d6c997b300149269367d00fb5db66b7c2ea25b7 100644 (file)
@@ -349,6 +349,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
 
        if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
                magicmouse_emit_buttons(msc, clicks & 3);
+               input_mt_report_pointer_emulation(input, true);
                input_report_rel(input, REL_X, x);
                input_report_rel(input, REL_Y, y);
        } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -388,16 +389,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
                __clear_bit(BTN_RIGHT, input->keybit);
                __clear_bit(BTN_MIDDLE, input->keybit);
                __set_bit(BTN_MOUSE, input->keybit);
-               __set_bit(BTN_TOOL_FINGER, input->keybit);
-               __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
-               __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
-               __set_bit(BTN_TOOL_QUADTAP, input->keybit);
-               __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
-               __set_bit(BTN_TOUCH, input->keybit);
-               __set_bit(INPUT_PROP_POINTER, input->propbit);
                __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
        }
 
+       __set_bit(BTN_TOOL_FINGER, input->keybit);
+       __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+       __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+       __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+       __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
+       __set_bit(BTN_TOUCH, input->keybit);
+       __set_bit(INPUT_PROP_POINTER, input->propbit);
 
        __set_bit(EV_ABS, input->evbit);
 
index 8daa8ce64ebba51e91e57ec801fa7e702fb2a072..fb55fb4c39fcfecaca55c0b8720d28d2f9717678 100644 (file)
@@ -897,6 +897,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        return 0;
 }
 
+static void i2c_hid_acpi_fix_up_power(struct device *dev)
+{
+       acpi_handle handle = ACPI_HANDLE(dev);
+       struct acpi_device *adev;
+
+       if (handle && acpi_bus_get_device(handle, &adev) == 0)
+               acpi_device_fix_up_power(adev);
+}
+
 static const struct acpi_device_id i2c_hid_acpi_match[] = {
        {"ACPI0C50", 0 },
        {"PNP0C50", 0 },
@@ -909,6 +918,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
 {
        return -ENODEV;
 }
+
+static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
 #endif
 
 #ifdef CONFIG_OF
@@ -1030,6 +1041,8 @@ static int i2c_hid_probe(struct i2c_client *client,
        if (ret < 0)
                goto err_regulator;
 
+       i2c_hid_acpi_fix_up_power(&client->dev);
+
        pm_runtime_get_noresume(&client->dev);
        pm_runtime_set_active(&client->dev);
        pm_runtime_enable(&client->dev);
index 4b225fb19a16842f635026d1b1023d5d1cf5068e..e274c9dc32f3a211d97d02f2b6477f20c9121fac 100644 (file)
@@ -1571,37 +1571,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
 {
        unsigned char *data = wacom->data;
 
-       if (wacom->pen_input)
+       if (wacom->pen_input) {
                dev_dbg(wacom->pen_input->dev.parent,
                        "%s: received report #%d\n", __func__, data[0]);
-       else if (wacom->touch_input)
+
+               if (len == WACOM_PKGLEN_PENABLED ||
+                   data[0] == WACOM_REPORT_PENABLED)
+                       return wacom_tpc_pen(wacom);
+       }
+       else if (wacom->touch_input) {
                dev_dbg(wacom->touch_input->dev.parent,
                        "%s: received report #%d\n", __func__, data[0]);
 
-       switch (len) {
-       case WACOM_PKGLEN_TPC1FG:
-               return wacom_tpc_single_touch(wacom, len);
+               switch (len) {
+               case WACOM_PKGLEN_TPC1FG:
+                       return wacom_tpc_single_touch(wacom, len);
 
-       case WACOM_PKGLEN_TPC2FG:
-               return wacom_tpc_mt_touch(wacom);
+               case WACOM_PKGLEN_TPC2FG:
+                       return wacom_tpc_mt_touch(wacom);
 
-       case WACOM_PKGLEN_PENABLED:
-               return wacom_tpc_pen(wacom);
+               default:
+                       switch (data[0]) {
+                       case WACOM_REPORT_TPC1FG:
+                       case WACOM_REPORT_TPCHID:
+                       case WACOM_REPORT_TPCST:
+                       case WACOM_REPORT_TPC1FGE:
+                               return wacom_tpc_single_touch(wacom, len);
 
-       default:
-               switch (data[0]) {
-               case WACOM_REPORT_TPC1FG:
-               case WACOM_REPORT_TPCHID:
-               case WACOM_REPORT_TPCST:
-               case WACOM_REPORT_TPC1FGE:
-                       return wacom_tpc_single_touch(wacom, len);
-
-               case WACOM_REPORT_TPCMT:
-               case WACOM_REPORT_TPCMT2:
-                       return wacom_mt_touch(wacom);
+                       case WACOM_REPORT_TPCMT:
+                       case WACOM_REPORT_TPCMT2:
+                               return wacom_mt_touch(wacom);
 
-               case WACOM_REPORT_PENABLED:
-                       return wacom_tpc_pen(wacom);
+                       }
                }
        }
 
index 22d5eafd681541374817314a1a2db164bb235ce7..5ef2814345ef7f37aa47fd47b8765ce6cc7fad1b 100644 (file)
@@ -343,6 +343,7 @@ config SENSORS_ASB100
 
 config SENSORS_ASPEED
        tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver"
+       select REGMAP
        help
          This driver provides support for ASPEED AST2400/AST2500 PWM
          and Fan Tacho controllers.
index 48403a2115beb68df404766350cfa844974a55e3..9de13d626c6896d379da385ab48da29258e5f3ee 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/errno.h>
 #include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/hwmon.h>
@@ -494,7 +495,7 @@ static u32 aspeed_get_fan_tach_ch_measure_period(struct aspeed_pwm_tacho_data
        return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit);
 }
 
-static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
                                      u8 fan_tach_ch)
 {
        u32 raw_data, tach_div, clk_source, sec, val;
@@ -510,6 +511,9 @@ static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
        msleep(sec);
 
        regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val);
+       if (!(val & RESULT_STATUS_MASK))
+               return -ETIMEDOUT;
+
        raw_data = val & RESULT_VALUE_MASK;
        tach_div = priv->type_fan_tach_clock_division[type];
        tach_div = 0x4 << (tach_div * 2);
@@ -561,12 +565,14 @@ static ssize_t show_rpm(struct device *dev, struct device_attribute *attr,
 {
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
        int index = sensor_attr->index;
-       u32 rpm;
+       int rpm;
        struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
 
        rpm = aspeed_get_fan_tach_ch_rpm(priv, index);
+       if (rpm < 0)
+               return rpm;
 
-       return sprintf(buf, "%u\n", rpm);
+       return sprintf(buf, "%d\n", rpm);
 }
 
 static umode_t pwm_is_visible(struct kobject *kobj,
@@ -591,24 +597,23 @@ static umode_t fan_dev_is_visible(struct kobject *kobj,
        return a->mode;
 }
 
-static SENSOR_DEVICE_ATTR(pwm0, 0644,
-                       show_pwm, set_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm1, 0644,
-                       show_pwm, set_pwm, 1);
+                       show_pwm, set_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm2, 0644,
-                       show_pwm, set_pwm, 2);
+                       show_pwm, set_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, 0644,
-                       show_pwm, set_pwm, 3);
+                       show_pwm, set_pwm, 2);
 static SENSOR_DEVICE_ATTR(pwm4, 0644,
-                       show_pwm, set_pwm, 4);
+                       show_pwm, set_pwm, 3);
 static SENSOR_DEVICE_ATTR(pwm5, 0644,
-                       show_pwm, set_pwm, 5);
+                       show_pwm, set_pwm, 4);
 static SENSOR_DEVICE_ATTR(pwm6, 0644,
-                       show_pwm, set_pwm, 6);
+                       show_pwm, set_pwm, 5);
 static SENSOR_DEVICE_ATTR(pwm7, 0644,
+                       show_pwm, set_pwm, 6);
+static SENSOR_DEVICE_ATTR(pwm8, 0644,
                        show_pwm, set_pwm, 7);
 static struct attribute *pwm_dev_attrs[] = {
-       &sensor_dev_attr_pwm0.dev_attr.attr,
        &sensor_dev_attr_pwm1.dev_attr.attr,
        &sensor_dev_attr_pwm2.dev_attr.attr,
        &sensor_dev_attr_pwm3.dev_attr.attr,
@@ -616,6 +621,7 @@ static struct attribute *pwm_dev_attrs[] = {
        &sensor_dev_attr_pwm5.dev_attr.attr,
        &sensor_dev_attr_pwm6.dev_attr.attr,
        &sensor_dev_attr_pwm7.dev_attr.attr,
+       &sensor_dev_attr_pwm8.dev_attr.attr,
        NULL,
 };
 
@@ -624,40 +630,39 @@ static const struct attribute_group pwm_dev_group = {
        .is_visible = pwm_is_visible,
 };
 
-static SENSOR_DEVICE_ATTR(fan0_input, 0444,
-               show_rpm, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan1_input, 0444,
-               show_rpm, NULL, 1);
+               show_rpm, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan2_input, 0444,
-               show_rpm, NULL, 2);
+               show_rpm, NULL, 1);
 static SENSOR_DEVICE_ATTR(fan3_input, 0444,
-               show_rpm, NULL, 3);
+               show_rpm, NULL, 2);
 static SENSOR_DEVICE_ATTR(fan4_input, 0444,
-               show_rpm, NULL, 4);
+               show_rpm, NULL, 3);
 static SENSOR_DEVICE_ATTR(fan5_input, 0444,
-               show_rpm, NULL, 5);
+               show_rpm, NULL, 4);
 static SENSOR_DEVICE_ATTR(fan6_input, 0444,
-               show_rpm, NULL, 6);
+               show_rpm, NULL, 5);
 static SENSOR_DEVICE_ATTR(fan7_input, 0444,
-               show_rpm, NULL, 7);
+               show_rpm, NULL, 6);
 static SENSOR_DEVICE_ATTR(fan8_input, 0444,
-               show_rpm, NULL, 8);
+               show_rpm, NULL, 7);
 static SENSOR_DEVICE_ATTR(fan9_input, 0444,
-               show_rpm, NULL, 9);
+               show_rpm, NULL, 8);
 static SENSOR_DEVICE_ATTR(fan10_input, 0444,
-               show_rpm, NULL, 10);
+               show_rpm, NULL, 9);
 static SENSOR_DEVICE_ATTR(fan11_input, 0444,
-               show_rpm, NULL, 11);
+               show_rpm, NULL, 10);
 static SENSOR_DEVICE_ATTR(fan12_input, 0444,
-               show_rpm, NULL, 12);
+               show_rpm, NULL, 11);
 static SENSOR_DEVICE_ATTR(fan13_input, 0444,
-               show_rpm, NULL, 13);
+               show_rpm, NULL, 12);
 static SENSOR_DEVICE_ATTR(fan14_input, 0444,
-               show_rpm, NULL, 14);
+               show_rpm, NULL, 13);
 static SENSOR_DEVICE_ATTR(fan15_input, 0444,
+               show_rpm, NULL, 14);
+static SENSOR_DEVICE_ATTR(fan16_input, 0444,
                show_rpm, NULL, 15);
 static struct attribute *fan_dev_attrs[] = {
-       &sensor_dev_attr_fan0_input.dev_attr.attr,
        &sensor_dev_attr_fan1_input.dev_attr.attr,
        &sensor_dev_attr_fan2_input.dev_attr.attr,
        &sensor_dev_attr_fan3_input.dev_attr.attr,
@@ -673,6 +678,7 @@ static struct attribute *fan_dev_attrs[] = {
        &sensor_dev_attr_fan13_input.dev_attr.attr,
        &sensor_dev_attr_fan14_input.dev_attr.attr,
        &sensor_dev_attr_fan15_input.dev_attr.attr,
+       &sensor_dev_attr_fan16_input.dev_attr.attr,
        NULL
 };
 
@@ -802,7 +808,6 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
                if (ret)
                        return ret;
        }
-       of_node_put(np);
 
        priv->groups[0] = &pwm_dev_group;
        priv->groups[1] = &fan_dev_group;
index 1844770f3ae838bd63f3837bc0f475b0c8b5f1b1..2b4d613a347491295a51932699264c6edcd4bb51 100644 (file)
@@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
        primary_path->packet_life_time =
                cm_req_get_primary_local_ack_timeout(req_msg);
        primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
-       sa_path_set_service_id(primary_path, req_msg->service_id);
+       primary_path->service_id = req_msg->service_id;
 
        if (req_msg->alt_local_lid) {
                alt_path->dgid = req_msg->alt_local_gid;
@@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
                alt_path->packet_life_time =
                        cm_req_get_alt_local_ack_timeout(req_msg);
                alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
-               sa_path_set_service_id(alt_path, req_msg->service_id);
+               alt_path->service_id = req_msg->service_id;
        }
 }
 
index 91b7a2fe5a55488ce1e5bb886ba19023bd624328..31bb82d8ecd7f19bbee90bd95a83cec7fe5abca7 100644 (file)
@@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
                        ib->sib_pkey = path->pkey;
                        ib->sib_flowinfo = path->flow_label;
                        memcpy(&ib->sib_addr, &path->sgid, 16);
-                       ib->sib_sid = sa_path_get_service_id(path);
+                       ib->sib_sid = path->service_id;
                        ib->sib_scope_id = 0;
                } else {
                        ib->sib_pkey = listen_ib->sib_pkey;
@@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
                memcpy(&req->local_gid, &req_param->primary_path->sgid,
                       sizeof(req->local_gid));
                req->has_gid    = true;
-               req->service_id =
-                       sa_path_get_service_id(req_param->primary_path);
+               req->service_id = req_param->primary_path->service_id;
                req->pkey       = be16_to_cpu(req_param->primary_path->pkey);
                if (req->pkey != req_param->bth_pkey)
                        pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
@@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
        struct rdma_route *rt;
        const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
        struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
-       const __be64 service_id = sa_path_get_service_id(path);
+       const __be64 service_id =
+               ib_event->param.req_rcvd.primary_path->service_id;
        int ret;
 
        id = rdma_create_id(listen_id->route.addr.dev_addr.net,
@@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
        path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
        path_rec.numb_path = 1;
        path_rec.reversible = 1;
-       sa_path_set_service_id(&path_rec,
-                              rdma_get_service_id(&id_priv->id,
-                                                  cma_dst_addr(id_priv)));
+       path_rec.service_id = rdma_get_service_id(&id_priv->id,
+                                                 cma_dst_addr(id_priv));
 
        comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
                    IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
index cb7d372e4bdf877206d8da9141d46a590395bcd9..d92ab4eaa8f311a44bc3854107d9b7f352c0bca7 100644 (file)
@@ -169,6 +169,16 @@ void ib_mad_cleanup(void);
 int ib_sa_init(void);
 void ib_sa_cleanup(void);
 
+int ibnl_init(void);
+void ibnl_cleanup(void);
+
+/**
+ * Check if there are any listeners to the netlink group
+ * @group: the netlink group ID
+ * Returns 0 on success or a negative for no listeners.
+ */
+int ibnl_chk_listeners(unsigned int group);
+
 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
                              struct netlink_callback *cb);
 int ib_nl_handle_set_timeout(struct sk_buff *skb,
index b784055423c8346b80e00f98d58f6499e7adc8ef..94931c474d41db72b606f654cb0d334087600ee5 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <rdma/rdma_netlink.h>
+#include "core_priv.h"
 
 struct ibnl_client {
        struct list_head                list;
@@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group)
                return -1;
        return 0;
 }
-EXPORT_SYMBOL(ibnl_chk_listeners);
 
 int ibnl_add_client(int index, int nops,
                    const struct ibnl_client_cbs cb_table[])
index e335b09c022ef69c58db15e8c37a43bfce9e8a42..fb7aec4047c8be90d0b70c39f66fdf6606c6fd7e 100644 (file)
@@ -194,7 +194,7 @@ static u32 tid;
        .field_name          = "sa_path_rec:" #field
 
 static const struct ib_field path_rec_table[] = {
-       { PATH_REC_FIELD(ib.service_id),
+       { PATH_REC_FIELD(service_id),
          .offset_words = 0,
          .offset_bits  = 0,
          .size_bits    = 64 },
@@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = {
        .field_name          = "sa_path_rec:" #field
 
 static const struct ib_field opa_path_rec_table[] = {
-       { OPA_PATH_REC_FIELD(opa.service_id),
+       { OPA_PATH_REC_FIELD(service_id),
          .offset_words = 0,
          .offset_bits  = 0,
          .size_bits    = 64 },
@@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
 
        /* Now build the attributes */
        if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
-               val64 = be64_to_cpu(sa_path_get_service_id(sa_rec));
+               val64 = be64_to_cpu(sa_rec->service_id);
                nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
                        sizeof(val64), &val64);
        }
index 3dbf811d3c517232e4ebbff5f6970d34d8f518ca..21e60b1e2ff41b1c27e98ebad68e5f4b0ccb7f42 100644 (file)
@@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
        for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
 
                page = sg_page(sg);
-               if (umem->writable && dirty)
+               if (!PageDirty(page) && umem->writable && dirty)
                        set_page_dirty_lock(page);
                put_page(page);
        }
index 0780b1afefa9d996c870e6982bfc9c5f9b13fb2a..8c4ec564e49583f6d05eab8df5e57e4378582f08 100644 (file)
@@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
                struct vm_area_struct *vma;
                struct hstate *h;
 
+               down_read(&mm->mmap_sem);
                vma = find_vma(mm, ib_umem_start(umem));
-               if (!vma || !is_vm_hugetlb_page(vma))
+               if (!vma || !is_vm_hugetlb_page(vma)) {
+                       up_read(&mm->mmap_sem);
                        return -EINVAL;
+               }
                h = hstate_vma(vma);
                umem->page_shift = huge_page_shift(h);
+               up_read(&mm->mmap_sem);
                umem->hugetlb = 1;
        } else {
                umem->hugetlb = 0;
index 8b9587fe23033fd69708dd60c7e76b18eb2e23ff..94fd989c90600b2640d2fb36f2397c9814160583 100644 (file)
@@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
 }
 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
 
-void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
-                               struct sa_path_rec *src)
+static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
+                                      struct sa_path_rec *src)
 {
-       memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid);
-       memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid);
+       memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid));
+       memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid));
 
        dst->dlid               = htons(ntohl(sa_path_get_dlid(src)));
        dst->slid               = htons(ntohl(sa_path_get_slid(src)));
index b6fe45924c6ed5a3a835c89f00b9265a940e57ef..0910faf3587b547e873bc4e5572e7defd93623b3 100644 (file)
@@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
 
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        release_ep_resources(ep);
+       kfree_skb(skb);
        return 0;
 }
 
@@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        c4iw_put_ep(&ep->parent_ep->com);
        release_ep_resources(ep);
+       kfree_skb(skb);
        return 0;
 }
 
@@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
 
        pr_debug("%s rdev %p\n", __func__, rdev);
        req->cmd = CPL_ABORT_NO_RST;
+       skb_get(skb);
        ret = c4iw_ofld_send(rdev, skb);
        if (ret) {
                __state_set(&ep->com, DEAD);
                queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
-       }
+       } else
+               kfree_skb(skb);
 }
 
 static int send_flowc(struct c4iw_ep *ep)
@@ -2517,7 +2521,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
 
-       hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
+       hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+              sizeof(struct tcphdr) +
               ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
        if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
                child_ep->mtu = peer_mss + hdrs;
index 329fb65e8fb0edfafd19d6fbea17196f810f29d5..f96a96dbcf1ff4e40b75de36122a3efd0405faae 100644 (file)
@@ -971,7 +971,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
                 devp->rdev.lldi.sge_egrstatuspagesize);
 
        devp->rdev.hw_queue.t4_eq_status_entries =
-               devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
+               devp->rdev.lldi.sge_egrstatuspagesize / 64;
        devp->rdev.hw_queue.t4_max_eq_size = 65520;
        devp->rdev.hw_queue.t4_max_iq_size = 65520;
        devp->rdev.hw_queue.t4_max_rq_size = 8192 -
index 5d6b1eeaa9a0a14c1088655cd5f049b5d3defa91..2ba00b89df6a046bba536cfe889c373d9063ced0 100644 (file)
@@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd)
        }
 }
 
-static void write_global_credit(struct hfi1_devdata *dd,
-                               u8 vau, u16 total, u16 shared)
+/*
+ * Set up allocation unit vaulue.
+ */
+void set_up_vau(struct hfi1_devdata *dd, u8 vau)
 {
-       write_csr(dd, SEND_CM_GLOBAL_CREDIT,
-                 ((u64)total <<
-                  SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
-                 ((u64)shared <<
-                  SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
-                 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
+       u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+       /* do not modify other values in the register */
+       reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
+       reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
+       write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
 }
 
 /*
  * Set up initial VL15 credits of the remote.  Assumes the rest of
- * the CM credit registers are zero from a previous global or credit reset .
+ * the CM credit registers are zero from a previous global or credit reset.
+ * Shared limit for VL15 will always be 0.
  */
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
 {
-       /* leave shared count at zero for both global and VL15 */
-       write_global_credit(dd, vau, vl15buf, 0);
+       u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+       /* set initial values for total and shared credit limit */
+       reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
+                SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
+
+       /*
+        * Set total limit to be equal to VL15 credits.
+        * Leave shared limit at 0.
+        */
+       reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
+       write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
 
        write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
                  << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
@@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd)
        for (i = 0; i < TXE_NUM_DATA_VL; i++)
                write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
        write_csr(dd, SEND_CM_CREDIT_VL15, 0);
-       write_global_credit(dd, 0, 0, 0);
+       write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
        /* reset the CM block */
        pio_send_control(dd, PSC_CM_RESET);
+       /* reset cached value */
+       dd->vl15buf_cached = 0;
 }
 
 /* convert a vCU to a CU */
@@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work)
 {
        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
                                                  link_up_work);
+       struct hfi1_devdata *dd = ppd->dd;
+
        set_link_state(ppd, HLS_UP_INIT);
 
        /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
-       read_ltp_rtt(ppd->dd);
+       read_ltp_rtt(dd);
        /*
         * OPA specifies that certain counters are cleared on a transition
         * to link up, so do that.
         */
-       clear_linkup_counters(ppd->dd);
+       clear_linkup_counters(dd);
        /*
         * And (re)set link up default values.
         */
        set_linkup_defaults(ppd);
 
+       /*
+        * Set VL15 credits. Use cached value from verify cap interrupt.
+        * In case of quick linkup or simulator, vl15 value will be set by
+        * handle_linkup_change. VerifyCap interrupt handler will not be
+        * called in those scenarios.
+        */
+       if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
+               set_up_vl15(dd, dd->vl15buf_cached);
+
        /* enforce link speed enabled */
        if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
                /* oops - current speed is not enabled, bounce */
-               dd_dev_err(ppd->dd,
+               dd_dev_err(dd,
                           "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
                           ppd->link_speed_active, ppd->link_speed_enabled);
                set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
@@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work)
         */
        if (vau == 0)
                vau = 1;
-       set_up_vl15(dd, vau, vl15buf);
+       set_up_vau(dd, vau);
+
+       /*
+        * Set VL15 credits to 0 in global credit register. Cache remote VL15
+        * credits value and wait for link-up interrupt ot set it.
+        */
+       set_up_vl15(dd, 0);
+       dd->vl15buf_cached = vl15buf;
 
        /* set up the LCB CRC mode */
        crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
index 5bfa839d1c48bc14939f680e74b696ac43105119..793514f1d15fb4a82357ba755dd97c7855f076ad 100644 (file)
 #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
 #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
 #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
+#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull
 #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
+#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull
 #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
index da322e6668cc5893262c3316486147cac6df1ca9..414a04a481c2abab3b3fe44d3949d591e684c065 100644 (file)
@@ -1045,6 +1045,14 @@ struct hfi1_devdata {
        /* initial vl15 credits to use */
        u16 vl15_init;
 
+       /*
+        * Cached value for vl15buf, read during verify cap interrupt. VL15
+        * credits are to be kept at 0 and set when handling the link-up
+        * interrupt. This removes the possibility of receiving VL15 MAD
+        * packets before this HFI is ready.
+        */
+       u16 vl15buf_cached;
+
        /* Misc small ints */
        u8 n_krcv_queues;
        u8 qos_shift;
@@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
 
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
+void set_up_vau(struct hfi1_devdata *dd, u8 vau);
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
 void reset_link_credits(struct hfi1_devdata *dd);
 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
 
index ba265d0ae93b4a96d6c29364f30b34c6046c335b..04a5082d5ac55259d992db8843cd361797b2992c 100644 (file)
@@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
                 * the remote values.  Both sides must be using the values.
                 */
                if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
-                       set_up_vl15(dd, dd->vau, dd->vl15_init);
+                       set_up_vau(dd, dd->vau);
+                       set_up_vl15(dd, dd->vl15_init);
                        assign_remote_cm_au_table(dd, dd->vcu);
                }
 
index 93faf86d54b620fb2cc621844932d811c17868dd..6a9f6f9819e1a326b0ed5f15f831d10cec2620a6 100644 (file)
@@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
        /*
         * Save BARs and command to rewrite after device reset.
         */
-       dd->pcibar0 = addr;
-       dd->pcibar1 = addr >> 32;
+       pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0);
+       pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1);
        pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
        pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
        pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
index 069bdaf061ab923cbc8b123ab182806fdb3c4dac..1080778a1f7c4a38816ce02058f63baae862d89e 100644 (file)
@@ -2159,8 +2159,11 @@ send_last:
                ret = hfi1_rvt_get_rwqe(qp, 1);
                if (ret < 0)
                        goto nack_op_err;
-               if (!ret)
+               if (!ret) {
+                       /* peer will send again */
+                       rvt_put_ss(&qp->r_sge);
                        goto rnr_nak;
+               }
                wc.ex.imm_data = ohdr->u.rc.imm_data;
                wc.wc_flags = IB_WC_WITH_IMM;
                goto send_last;
index 50d140d25e38fa3cce8b4512b86099c633f1bc6c..2f3bbcac1e3492ff6cfeb5d92ebef73bda8b59fd 100644 (file)
@@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = {
 };
 
 static struct attribute *port_cc_default_attributes[] = {
-       &cc_prescan_attr.attr
+       &cc_prescan_attr.attr,
+       NULL
 };
 
 static struct kobj_type port_cc_ktype = {
index f3bc01bce483fe5d81ba08e2a60212e4f0f32b98..6ae98aa7f74ebb14f4ce2e9e3cee7c629270253f 100644 (file)
@@ -784,7 +784,6 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
        }
 
        ctrl_ird |= IETF_PEER_TO_PEER;
-       ctrl_ird |= IETF_FLPDU_ZERO_LEN;
 
        switch (mpa_key) {
        case MPA_KEY_REQUEST:
@@ -2446,8 +2445,8 @@ static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
                } else {
                        type = I40IW_CM_EVENT_CONNECTED;
                        cm_node->state = I40IW_CM_STATE_OFFLOADED;
-                       i40iw_send_ack(cm_node);
                }
+               i40iw_send_ack(cm_node);
                break;
        default:
                pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
index f82483b3d1e7dc205fa2f6a5e8037fd4d6866eab..a027e2072477aef12a230fdc79a1a1d6668bf15c 100644 (file)
@@ -285,28 +285,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
        struct i40iw_sc_dev *dev = vsi->dev;
        struct i40iw_sc_qp *qp = NULL;
        bool qs_handle_change = false;
-       bool mss_change = false;
        unsigned long flags;
        u16 qs_handle;
        int i;
 
-       if (vsi->mss != l2params->mss) {
-               mss_change = true;
-               vsi->mss = l2params->mss;
-       }
+       vsi->mss = l2params->mss;
 
        i40iw_fill_qos_list(l2params->qs_handle_list);
        for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
                qs_handle = l2params->qs_handle_list[i];
                if (vsi->qos[i].qs_handle != qs_handle)
                        qs_handle_change = true;
-               else if (!mss_change)
-                       continue;       /* no MSS nor qs handle change */
                spin_lock_irqsave(&vsi->qos[i].lock, flags);
                qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
                while (qp) {
-                       if (mss_change)
-                               i40iw_qp_mss_modify(dev, qp);
                        if (qs_handle_change) {
                                qp->qs_handle = qs_handle;
                                /* issue cqp suspend command */
@@ -2395,7 +2387,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
 
        set_64bit_val(wqe,
                      8,
-                     LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
                      LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
 
        set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
@@ -2410,7 +2401,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
                 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
                 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
                 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
-                LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
                 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
                 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
                 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
index 2728af3103ce9ae285ab8c0b88980e716a1cb5e4..a3f18a22f5ed1787794031eb8a1765ab2804cdc4 100644 (file)
@@ -1319,13 +1319,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
        status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
                                       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
        if (status)
-               goto exit;
+               goto error;
        info.fpm_query_buf_pa = mem.pa;
        info.fpm_query_buf = mem.va;
        status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
                                       I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
        if (status)
-               goto exit;
+               goto error;
        info.fpm_commit_buf_pa = mem.pa;
        info.fpm_commit_buf = mem.va;
        info.hmc_fn_id = ldev->fid;
@@ -1347,11 +1347,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
        info.exception_lan_queue = 1;
        info.vchnl_send = i40iw_virtchnl_send;
        status = i40iw_device_init(&iwdev->sc_dev, &info);
-exit:
-       if (status) {
-               kfree(iwdev->hmc_info_mem);
-               iwdev->hmc_info_mem = NULL;
-       }
+
+       if (status)
+               goto error;
        memset(&vsi_info, 0, sizeof(vsi_info));
        vsi_info.dev = &iwdev->sc_dev;
        vsi_info.back_vsi = (void *)iwdev;
@@ -1362,11 +1360,19 @@ exit:
                memset(&stats_info, 0, sizeof(stats_info));
                stats_info.fcn_id = ldev->fid;
                stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+               if (!stats_info.pestat) {
+                       status = I40IW_ERR_NO_MEMORY;
+                       goto error;
+               }
                stats_info.stats_initialize = true;
                if (stats_info.pestat)
                        i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
        }
        return status;
+error:
+       kfree(iwdev->hmc_info_mem);
+       iwdev->hmc_info_mem = NULL;
+       return status;
 }
 
 /**
index aa66c1c63dfa4b0879eecfdb4a46f58f3279de98..f27be3e7830bb438f5543d88ef35eae1c4d93586 100644 (file)
@@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
                            struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
 void *i40iw_remove_head(struct list_head *list);
 void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
 
 void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
 void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
index 7b76259752b0062e5cf16f7bc097f5cd4b66098e..959ec81fba99ca6499f2e8d7d68f0cb073c86646 100644 (file)
@@ -541,7 +541,6 @@ struct i40iw_create_qp_info {
 struct i40iw_modify_qp_info {
        u64 rx_win0;
        u64 rx_win1;
-       u16 new_mss;
        u8 next_iwarp_state;
        u8 termlen;
        bool ord_valid;
@@ -554,7 +553,6 @@ struct i40iw_modify_qp_info {
        bool dont_send_term;
        bool dont_send_fin;
        bool cached_var_valid;
-       bool mss_change;
        bool force_loopback;
 };
 
index 409a3781e735db6f2072bde350815beed380425e..56d986924a4c1708216684f776f4705451a65f79 100644 (file)
@@ -756,23 +756,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b
                i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
 }
 
-/**
- * i40iw_qp_mss_modify - modify mss for qp
- * @dev: hardware control device structure
- * @qp: hardware control qp
- */
-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
-{
-       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-       struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
-       struct i40iw_modify_qp_info info;
-
-       memset(&info, 0, sizeof(info));
-       info.mss_change = true;
-       info.new_mss = qp->vsi->mss;
-       i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
-}
-
 /**
  * i40iw_term_modify_qp - modify qp for term message
  * @qp: hardware control qp
index f4d13683a403a6369c61d40d0a19cc4e5124334d..48fd327f876b08b5b246b41425ff997e64cf861d 100644 (file)
@@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
        if (!dev->vchnl_up)
                return I40IW_ERR_NOT_READY;
        if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
-               if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
-                       vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
-               else
-                       vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+               vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
                return I40IW_SUCCESS;
        }
        for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
index b4694717f6f301f4a2d6b407f18b2c42614ee24e..21d31cb1325f5fc0271f093e5ec069641b8b9ffe 100644 (file)
@@ -1578,6 +1578,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
        if (port < 0)
                return;
        ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+       ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
 
        mlx4_ib_query_ah(&ah.ibah, &ah_attr);
        if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
index d45772da09635c2164f4cef8bcf5255c17fe8cff..0c79983c8b1a0a4e6189fb5e02439ff3522a9098 100644 (file)
@@ -2979,6 +2979,18 @@ error_0:
        return ret;
 }
 
+static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
+{
+       switch (umr_fence_cap) {
+       case MLX5_CAP_UMR_FENCE_NONE:
+               return MLX5_FENCE_MODE_NONE;
+       case MLX5_CAP_UMR_FENCE_SMALL:
+               return MLX5_FENCE_MODE_INITIATOR_SMALL;
+       default:
+               return MLX5_FENCE_MODE_STRONG_ORDERING;
+       }
+}
+
 static int create_dev_resources(struct mlx5_ib_resources *devr)
 {
        struct ib_srq_init_attr attr;
@@ -3693,6 +3705,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 
        mlx5_ib_internal_fill_odp_caps(dev);
 
+       dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
+
        if (MLX5_CAP_GEN(mdev, imaicl)) {
                dev->ib_dev.alloc_mw            = mlx5_ib_alloc_mw;
                dev->ib_dev.dealloc_mw          = mlx5_ib_dealloc_mw;
index 38c877bc45e592dbbe9670b3c9b624b90b6df30d..bdcf25410c99df7f57e280f2672eb86b8e3205fd 100644 (file)
@@ -349,7 +349,7 @@ struct mlx5_ib_qp {
        struct mlx5_ib_wq       rq;
 
        u8                      sq_signal_bits;
-       u8                      fm_cache;
+       u8                      next_fence;
        struct mlx5_ib_wq       sq;
 
        /* serialize qp state modifications
@@ -654,6 +654,7 @@ struct mlx5_ib_dev {
        struct mlx5_ib_port     *port;
        struct mlx5_sq_bfreg     bfreg;
        struct mlx5_sq_bfreg     fp_bfreg;
+       u8                              umr_fence;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
index 93959e1e43a3da5e2f71a89c8c12bdc61368e3e0..ebb6768684de372d755cff4c6f92c768edb9045b 100644 (file)
@@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
        }
 }
 
-static u8 get_fence(u8 fence, struct ib_send_wr *wr)
-{
-       if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
-                    wr->send_flags & IB_SEND_FENCE))
-               return MLX5_FENCE_MODE_STRONG_ORDERING;
-
-       if (unlikely(fence)) {
-               if (wr->send_flags & IB_SEND_FENCE)
-                       return MLX5_FENCE_MODE_SMALL_AND_FENCE;
-               else
-                       return fence;
-       } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
-               return MLX5_FENCE_MODE_FENCE;
-       }
-
-       return 0;
-}
-
 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
                     struct mlx5_wqe_ctrl_seg **ctrl,
                     struct ib_send_wr *wr, unsigned *idx,
@@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
 static void finish_wqe(struct mlx5_ib_qp *qp,
                       struct mlx5_wqe_ctrl_seg *ctrl,
                       u8 size, unsigned idx, u64 wr_id,
-                      int nreq, u8 fence, u8 next_fence,
-                      u32 mlx5_opcode)
+                      int nreq, u8 fence, u32 mlx5_opcode)
 {
        u8 opmod = 0;
 
@@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
                                             mlx5_opcode | ((u32)opmod << 24));
        ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
        ctrl->fm_ce_se |= fence;
-       qp->fm_cache = next_fence;
        if (unlikely(qp->wq_sig))
                ctrl->signature = wq_sig(ctrl);
 
@@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        goto out;
                }
 
-               fence = qp->fm_cache;
                num_sge = wr->num_sge;
                if (unlikely(num_sge > qp->sq.max_gs)) {
                        mlx5_ib_warn(dev, "\n");
@@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        goto out;
                }
 
+               if (wr->opcode == IB_WR_LOCAL_INV ||
+                   wr->opcode == IB_WR_REG_MR) {
+                       fence = dev->umr_fence;
+                       next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+               } else if (wr->send_flags & IB_SEND_FENCE) {
+                       if (qp->next_fence)
+                               fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+                       else
+                               fence = MLX5_FENCE_MODE_FENCE;
+               } else {
+                       fence = qp->next_fence;
+               }
+
                switch (ibqp->qp_type) {
                case IB_QPT_XRC_INI:
                        xrc = seg;
@@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                goto out;
 
                        case IB_WR_LOCAL_INV:
-                               next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
                                ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
                                set_linv_wr(qp, &seg, &size);
@@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                break;
 
                        case IB_WR_REG_MR:
-                               next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                qp->sq.wr_data[idx] = IB_WR_REG_MR;
                                ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
                                err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
@@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-                                          nreq, get_fence(fence, wr),
-                                          next_fence, MLX5_OPCODE_UMR);
+                               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+                                          fence, MLX5_OPCODE_UMR);
                                /*
                                 * SET_PSV WQEs are not signaled and solicited
                                 * on error
@@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-                                          nreq, get_fence(fence, wr),
-                                          next_fence, MLX5_OPCODE_SET_PSV);
+                               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+                                          fence, MLX5_OPCODE_SET_PSV);
                                err = begin_wqe(qp, &seg, &ctrl, wr,
                                                &idx, &size, nreq);
                                if (err) {
@@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
                                                 mr->sig->psv_wire.psv_idx, &seg,
                                                 &size);
@@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-                                          nreq, get_fence(fence, wr),
-                                          next_fence, MLX5_OPCODE_SET_PSV);
+                               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+                                          fence, MLX5_OPCODE_SET_PSV);
+                               qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                num_sge = 0;
                                goto skip_psv;
 
@@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        }
                }
 
-               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
-                          get_fence(fence, wr), next_fence,
+               qp->next_fence = next_fence;
+               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
                           mlx5_ib_opcode[wr->opcode]);
 skip_psv:
                if (0)
index fb983df7c157b660239983e00c85da5627863dec..30b256a2c54ec42dd97b29ff0f0cb15be6d44510 100644 (file)
@@ -610,7 +610,6 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
                ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD;
        }
        ctrl_ird |= IETF_PEER_TO_PEER;
-       ctrl_ird |= IETF_FLPDU_ZERO_LEN;
 
        switch (mpa_key) {
        case MPA_KEY_REQUEST:
@@ -1826,7 +1825,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
                        type = NES_CM_EVENT_CONNECTED;
                        cm_node->state = NES_CM_STATE_TSA;
                }
-
+               send_ack(cm_node, NULL);
                break;
        default:
                WARN_ON(1);
index 3d7705cec7705fcf334a96353e6830b554176500..d86dbe814d98fbe00adf22acb6d1ee658efc5ef5 100644 (file)
@@ -270,11 +270,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
                return rc;
        }
 
-       vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
-       if (vlan_id < VLAN_CFI_MASK)
-               has_vlan = true;
-       if (sgid_attr.ndev)
+       if (sgid_attr.ndev) {
+               vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+               if (vlan_id < VLAN_CFI_MASK)
+                       has_vlan = true;
+
                dev_put(sgid_attr.ndev);
+       }
 
        if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
                DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
index fc8b88514da52bc380ce51066e0a2665ff18a2be..4ddbcac5eabe6834f90ecac5885b75495493264a 100644 (file)
@@ -1956,8 +1956,10 @@ send_last:
                ret = qib_get_rwqe(qp, 1);
                if (ret < 0)
                        goto nack_op_err;
-               if (!ret)
+               if (!ret) {
+                       rvt_put_ss(&qp->r_sge);
                        goto rnr_nak;
+               }
                wc.ex.imm_data = ohdr->u.rc.imm_data;
                hdrsize += 4;
                wc.wc_flags = IB_WC_WITH_IMM;
index 874b24366e4dd744cc9ddf1c4b561a5a050641d1..7871379342f48fa77b2e6e8279ca774b4c49ad2f 100644 (file)
@@ -178,7 +178,7 @@ static inline int ib_speed_enum_to_int(int speed)
 static int ipoib_get_link_ksettings(struct net_device *netdev,
                                    struct ethtool_link_ksettings *cmd)
 {
-       struct ipoib_dev_priv *priv = netdev_priv(netdev);
+       struct ipoib_dev_priv *priv = ipoib_priv(netdev);
        struct ib_port_attr attr;
        int ret, speed, width;
 
index 2869d1adb1decdab20a19145536fc978344aba78..a115c0b7a310ed630c1c32ffd9e2c17574358f7c 100644 (file)
@@ -1590,7 +1590,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
        wait_for_completion(&priv->ntbl.deleted);
 }
 
-void ipoib_dev_uninit_default(struct net_device *dev)
+static void ipoib_dev_uninit_default(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
 
index def723a5df29fa72342ed5e52ec7fa35fa54375c..2354c742caa12d69ef761ecbc7ca8af311bb8aa5 100644 (file)
@@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
        ch->path.sgid = target->sgid;
        ch->path.dgid = target->orig_dgid;
        ch->path.pkey = target->pkey;
-       sa_path_set_service_id(&ch->path, target->service_id);
+       ch->path.service_id = target->service_id;
 
        return 0;
 }
@@ -575,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
        return 0;
 
 err_qp:
-       srp_destroy_qp(ch, qp);
+       ib_destroy_qp(qp);
 
 err_send_cq:
        ib_free_cq(send_cq);
index d07dd5196ffca59c11532051fb88e2ecdc7326c9..8aa158a091806fd7d3107ed56b8550198be7b9a0 100644 (file)
@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
                       id);
                return NULL;
        } else {
-               rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
+               rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
                if (!rs)
                        return NULL;
                rs->state = CCPResetIdle;
index 8b7faea2ddf88b718c252dc049e5d1b8b5e8357b..422dced7c90ac26dcf0d366fedb32ab9edf44207 100644 (file)
@@ -75,7 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
                if (sk->sk_state != MISDN_BOUND)
                        continue;
                if (!cskb)
-                       cskb = skb_copy(skb, GFP_KERNEL);
+                       cskb = skb_copy(skb, GFP_ATOMIC);
                if (!cskb) {
                        printk(KERN_WARNING "%s no skb\n", __func__);
                        break;
index bf7419a56454e3834ea2c2034d3170591f1ad97f..f4eace5ea184095eb0c170c4f3f1647f72b8c537 100644 (file)
@@ -485,10 +485,10 @@ void bitmap_print_sb(struct bitmap *bitmap)
        pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
        pr_debug("       version: %d\n", le32_to_cpu(sb->version));
        pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
-                *(__u32 *)(sb->uuid+0),
-                *(__u32 *)(sb->uuid+4),
-                *(__u32 *)(sb->uuid+8),
-                *(__u32 *)(sb->uuid+12));
+                le32_to_cpu(*(__u32 *)(sb->uuid+0)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+4)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+8)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+12)));
        pr_debug("        events: %llu\n",
                 (unsigned long long) le64_to_cpu(sb->events));
        pr_debug("events cleared: %llu\n",
index cd8139593ccd50655a2329460cc8de9d175eac86..840c1496b2b138ef504bde4c441b1082df183473 100644 (file)
@@ -1334,7 +1334,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
 {
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = REQ_PREFLUSH,
+               .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = c->dm_io,
index c7f7c8d7657670850adedceb505538e1b9cdb2ce..7910bfe50da4469c44b571363cc6696f74f5fa42 100644 (file)
@@ -783,7 +783,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
                        for (i = 0; i < commit_sections; i++)
                                rw_section_mac(ic, commit_start + i, true);
                }
-               rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp);
+               rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
+                          commit_sections, &io_comp);
        } else {
                unsigned to_end;
                io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
@@ -2374,21 +2375,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
        blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
 }
 
-/* FIXME: use new kvmalloc */
-static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
-{
-       void *ptr = NULL;
-
-       if (size <= PAGE_SIZE)
-               ptr = kmalloc(size, GFP_KERNEL | gfp);
-       if (!ptr && size <= KMALLOC_MAX_SIZE)
-               ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
-       if (!ptr)
-               ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
-
-       return ptr;
-}
-
 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
 {
        unsigned i;
@@ -2407,7 +2393,7 @@ static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
        struct page_list *pl;
        unsigned i;
 
-       pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO);
+       pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
        if (!pl)
                return NULL;
 
@@ -2437,7 +2423,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
        struct scatterlist **sl;
        unsigned i;
 
-       sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO);
+       sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO);
        if (!sl)
                return NULL;
 
@@ -2453,7 +2439,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
 
                n_pages = (end_index - start_index + 1);
 
-               s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0);
+               s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL);
                if (!s) {
                        dm_integrity_free_journal_scatterlist(ic, sl);
                        return NULL;
@@ -2617,7 +2603,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                goto bad;
                        }
 
-                       sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0);
+                       sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL);
                        if (!sg) {
                                *error = "Unable to allocate sg list";
                                r = -ENOMEM;
@@ -2673,7 +2659,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                r = -ENOMEM;
                                goto bad;
                        }
-                       ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO);
+                       ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO);
                        if (!ic->sk_requests) {
                                *error = "Unable to allocate sk requests";
                                r = -ENOMEM;
@@ -2740,7 +2726,7 @@ retest_commit_id:
                r = -ENOMEM;
                goto bad;
        }
-       ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0);
+       ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
        if (!ic->journal_tree) {
                *error = "Could not allocate memory for journal tree";
                r = -ENOMEM;
index 0555b4410e0598a6096642f10978ad6798bc5f98..41852ae287a58c29e675dd4b794f1670f9dc53e8 100644 (file)
@@ -1710,12 +1710,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
        }
 
        /*
-        * Try to avoid low memory issues when a device is suspended.
+        * Use __GFP_HIGH to avoid low memory issues when a device is
+        * suspended and the ioctl is needed to resume it.
         * Use kmalloc() rather than vmalloc() when we can.
         */
        dmi = NULL;
        noio_flag = memalloc_noio_save();
-       dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL);
+       dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH);
        memalloc_noio_restore(noio_flag);
 
        if (!dmi) {
index a95cbb80fb34444144bad346b3e769c625e8c788..e61c45047c25a9ba2683c313fbc2151c9051b178 100644 (file)
@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = REQ_PREFLUSH,
+               .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = ms->io_client,
index b93476c3ba3f9767fb133fed977e7a888cc0698e..c5534d294773fc0267a1b4c7438a3316e74d417a 100644 (file)
@@ -741,7 +741,8 @@ static void persistent_commit_exception(struct dm_exception_store *store,
        /*
         * Commit exceptions to disk.
         */
-       if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
+       if (ps->valid && area_io(ps, REQ_OP_WRITE,
+                                REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
                ps->valid = 0;
 
        /*
index 97de961a3bfc80d11497c5ac2558ce7ad7a57a7e..1ec9b2c51c076d99ba6003f90eae608d9c9e35af 100644 (file)
@@ -166,7 +166,7 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
                return r;
        }
 
-       if (likely(v->version >= 1))
+       if (likely(v->salt_size && (v->version >= 1)))
                r = verity_hash_update(v, req, v->salt, v->salt_size, res);
 
        return r;
@@ -177,7 +177,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
 {
        int r;
 
-       if (unlikely(!v->version)) {
+       if (unlikely(v->salt_size && (!v->version))) {
                r = verity_hash_update(v, req, v->salt, v->salt_size, res);
 
                if (r < 0) {
index 6ef9500226c0c7d789ed78e6876195f73ef9d6b7..37ccd73c79ecf2eeb4f33b5bc597f88ca5750d4b 100644 (file)
@@ -1657,7 +1657,7 @@ static struct mapped_device *alloc_dev(int minor)
 
        bio_init(&md->flush_bio, NULL, 0);
        md->flush_bio.bi_bdev = md->bdev;
-       md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+       md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
 
        dm_stats_init(&md->stats);
 
index 7299ce2f08a810555a0407a423a512a0f59f190c..03082e17c65cc87af2a44a8020bda6cbdb8b0262 100644 (file)
@@ -1311,8 +1311,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
        cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
        lock_comm(cinfo, 1);
        ret = __sendmsg(cinfo, &cmsg);
-       if (ret)
+       if (ret) {
+               unlock_comm(cinfo);
                return ret;
+       }
        cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
        ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
        cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
index 10367ffe92e3e37704f5e32793ea97175c8b15e6..87edc342ccb3d5c51bb45313f1218f9839528917 100644 (file)
@@ -765,7 +765,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
            test_bit(FailFast, &rdev->flags) &&
            !test_bit(LastDev, &rdev->flags))
                ff = MD_FAILFAST;
-       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
+       bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
 
        atomic_inc(&mddev->pending_writes);
        submit_bio(bio);
@@ -5174,6 +5174,18 @@ static void mddev_delayed_delete(struct work_struct *ws)
 
 static void no_op(struct percpu_ref *r) {}
 
+int mddev_init_writes_pending(struct mddev *mddev)
+{
+       if (mddev->writes_pending.percpu_count_ptr)
+               return 0;
+       if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
+               return -ENOMEM;
+       /* We want to start with the refcount at zero */
+       percpu_ref_put(&mddev->writes_pending);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
+
 static int md_alloc(dev_t dev, char *name)
 {
        /*
@@ -5239,10 +5251,6 @@ static int md_alloc(dev_t dev, char *name)
        blk_queue_make_request(mddev->queue, md_make_request);
        blk_set_stacking_limits(&mddev->queue->limits);
 
-       if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
-               goto abort;
-       /* We want to start with the refcount at zero */
-       percpu_ref_put(&mddev->writes_pending);
        disk = alloc_disk(1 << shift);
        if (!disk) {
                blk_cleanup_queue(mddev->queue);
index 11f15146ce5177de0468c706a5f82a037b42c132..0fa1de42c42bcb328276a42fc56809d53217a285 100644 (file)
@@ -648,6 +648,7 @@ extern void md_unregister_thread(struct md_thread **threadp);
 extern void md_wakeup_thread(struct md_thread *thread);
 extern void md_check_recovery(struct mddev *mddev);
 extern void md_reap_sync_thread(struct mddev *mddev);
+extern int mddev_init_writes_pending(struct mddev *mddev);
 extern void md_write_start(struct mddev *mddev, struct bio *bi);
 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
 extern void md_write_end(struct mddev *mddev);
index af5056d568788a53f6c3a2456a353cba3bbfe35a..e1a7e3d4c5e4f17d0dedb4f171ad3bd47ff70022 100644 (file)
@@ -3063,6 +3063,8 @@ static int raid1_run(struct mddev *mddev)
                        mdname(mddev));
                return -EIO;
        }
+       if (mddev_init_writes_pending(mddev) < 0)
+               return -ENOMEM;
        /*
         * copy the already verified devices into our private RAID1
         * bookkeeping area. [whatever we allocate in run(),
index 4343d7ff9916bee9a9a399572c2bd3313723fa3a..797ed60abd5e27cd2f32d0f80d36c660ed1e4419 100644 (file)
@@ -3611,6 +3611,9 @@ static int raid10_run(struct mddev *mddev)
        int first = 1;
        bool discard_supported = false;
 
+       if (mddev_init_writes_pending(mddev) < 0)
+               return -ENOMEM;
+
        if (mddev->private == NULL) {
                conf = setup_conf(mddev);
                if (IS_ERR(conf))
index 4c00bc248287e4ab89b492225e0d054973725549..0a7af8b0a80a031a99a7af1742e2d64e6df0d106 100644 (file)
@@ -1782,7 +1782,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
        mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
                                             mb, PAGE_SIZE));
        if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
-                         REQ_FUA, false)) {
+                         REQ_SYNC | REQ_FUA, false)) {
                __free_page(page);
                return -EIO;
        }
@@ -2388,7 +2388,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
                mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
                                                     mb, PAGE_SIZE));
                sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
-                            REQ_OP_WRITE, REQ_FUA, false);
+                            REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false);
                sh->log_start = ctx->pos;
                list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
                atomic_inc(&log->stripe_in_journal_count);
index 5d25bebf3328e4967334465916aca3e3c750e447..ccce92e68d7fa5d8258bb7f2ca2bfa1bcd545709 100644 (file)
@@ -907,8 +907,8 @@ static int ppl_write_empty_header(struct ppl_log *log)
        pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
 
        if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
-                         PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_FUA, 0,
-                         false)) {
+                         PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
+                         REQ_FUA, 0, false)) {
                md_error(rdev->mddev, rdev);
                ret = -EIO;
        }
index 9c4f7659f8b1337c99cfd0ab5070012e3f658849..ec0f951ae19fbc8fa392306b7c6c45a38f3eb0e4 100644 (file)
@@ -4085,10 +4085,15 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
                        set_bit(STRIPE_INSYNC, &sh->state);
                else {
                        atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
+                               pr_warn_ratelimited("%s: mismatch sector in range "
+                                                   "%llu-%llu\n", mdname(conf->mddev),
+                                                   (unsigned long long) sh->sector,
+                                                   (unsigned long long) sh->sector +
+                                                   STRIPE_SECTORS);
+                       } else {
                                sh->check_state = check_state_compute_run;
                                set_bit(STRIPE_COMPUTE_RUN, &sh->state);
                                set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
@@ -4237,10 +4242,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
                        }
                } else {
                        atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
+                               pr_warn_ratelimited("%s: mismatch sector in range "
+                                                   "%llu-%llu\n", mdname(conf->mddev),
+                                                   (unsigned long long) sh->sector,
+                                                   (unsigned long long) sh->sector +
+                                                   STRIPE_SECTORS);
+                       } else {
                                int *target = &sh->ops.target;
 
                                sh->ops.target = -1;
@@ -7108,6 +7118,9 @@ static int raid5_run(struct mddev *mddev)
        long long min_offset_diff = 0;
        int first = 1;
 
+       if (mddev_init_writes_pending(mddev) < 0)
+               return -ENOMEM;
+
        if (mddev->recovery_cp != MaxSector)
                pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
                          mdname(mddev));
index b72edd27f880fbe99641cd36b44005c6ad9252c6..55d9c2b82b7eab11268d88f7d8e5cf586f910985 100644 (file)
@@ -2,6 +2,12 @@
 # Multimedia device configuration
 #
 
+config CEC_CORE
+       tristate
+
+config CEC_NOTIFIER
+       bool
+
 menuconfig MEDIA_SUPPORT
        tristate "Multimedia support"
        depends on HAS_IOMEM
index 523fea3648ad71749009fc9f7d0f543d36734128..044503aa8801744785da9c637bda4c020bfac766 100644 (file)
@@ -4,8 +4,6 @@
 
 media-objs     := media-device.o media-devnode.o media-entity.o
 
-obj-$(CONFIG_CEC_CORE) += cec/
-
 #
 # I2C drivers should come before other drivers, otherwise they'll fail
 # when compiled as builtin drivers
@@ -26,6 +24,8 @@ obj-$(CONFIG_DVB_CORE)  += dvb-core/
 # There are both core and drivers at RC subtree - merge before drivers
 obj-y += rc/
 
+obj-$(CONFIG_CEC_CORE) += cec/
+
 #
 # Finally, merge the drivers that require the core
 #
index f944d93e3167f4e338a1e9cb607af32311136b50..4e25a950ae6f5af73c7bc7bd305a51368ef7d52b 100644 (file)
@@ -1,19 +1,5 @@
-config CEC_CORE
-       tristate
-       depends on MEDIA_CEC_SUPPORT
-       default y
-
-config MEDIA_CEC_NOTIFIER
-       bool
-
 config MEDIA_CEC_RC
        bool "HDMI CEC RC integration"
        depends on CEC_CORE && RC_CORE
        ---help---
          Pass on CEC remote control messages to the RC framework.
-
-config MEDIA_CEC_DEBUG
-       bool "HDMI CEC debugfs interface"
-       depends on CEC_CORE && DEBUG_FS
-       ---help---
-         Turns on the DebugFS interface for CEC devices.
index 402a6c62a3e8b9bb4857ac553e1b9a14c6babc16..eaf408e646697adcb8cc95b95c8425574715bdee 100644 (file)
@@ -1,6 +1,6 @@
 cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
 
-ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y)
+ifeq ($(CONFIG_CEC_NOTIFIER),y)
   cec-objs += cec-notifier.o
 endif
 
index f5fe01c9da8af17906805668b8448cd6aaf9da1e..9dfc79800c7191964557afe729603afa5cf2b5df 100644 (file)
@@ -1864,7 +1864,7 @@ void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
                WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
 }
 
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
 /*
  * Log the current state of the CEC adapter.
  * Very useful for debugging.
index f9ebff90f8ebc08b77088c8f1901d8aa7077b99f..2f87748ba4fceea377284be6a1c35b6478ca4788 100644 (file)
@@ -187,7 +187,7 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
        put_device(&devnode->dev);
 }
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
 static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
 {
        cec_s_phys_addr(adap, pa, false);
@@ -323,7 +323,7 @@ int cec_register_adapter(struct cec_adapter *adap,
        }
 
        dev_set_drvdata(&adap->devnode.dev, adap);
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
        if (!top_cec_dir)
                return 0;
 
@@ -355,7 +355,7 @@ void cec_unregister_adapter(struct cec_adapter *adap)
        adap->rc = NULL;
 #endif
        debugfs_remove_recursive(adap->cec_dir);
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
        if (adap->notifier)
                cec_notifier_unregister(adap->notifier);
 #endif
@@ -395,7 +395,7 @@ static int __init cec_devnode_init(void)
                return ret;
        }
 
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
        top_cec_dir = debugfs_create_dir("cec", NULL);
        if (IS_ERR_OR_NULL(top_cec_dir)) {
                pr_warn("cec: Failed to create debugfs cec dir\n");
index fd181c99ce117fc44c16ded99d50abe9a655b4f8..aaa9471c7d117eae0a00e4bbbf4ed20e3ca65b36 100644 (file)
@@ -220,7 +220,8 @@ config VIDEO_ADV7604
 
 config VIDEO_ADV7604_CEC
        bool "Enable Analog Devices ADV7604 CEC support"
-       depends on VIDEO_ADV7604 && CEC_CORE
+       depends on VIDEO_ADV7604
+       select CEC_CORE
        ---help---
          When selected the adv7604 will support the optional
          HDMI CEC feature.
@@ -240,7 +241,8 @@ config VIDEO_ADV7842
 
 config VIDEO_ADV7842_CEC
        bool "Enable Analog Devices ADV7842 CEC support"
-       depends on VIDEO_ADV7842 && CEC_CORE
+       depends on VIDEO_ADV7842
+       select CEC_CORE
        ---help---
          When selected the adv7842 will support the optional
          HDMI CEC feature.
@@ -478,7 +480,8 @@ config VIDEO_ADV7511
 
 config VIDEO_ADV7511_CEC
        bool "Enable Analog Devices ADV7511 CEC support"
-       depends on VIDEO_ADV7511 && CEC_CORE
+       depends on VIDEO_ADV7511
+       select CEC_CORE
        ---help---
          When selected the adv7511 will support the optional
          HDMI CEC feature.
index ac026ee1ca07484ffa6b0c51632d1c9958c0d3cd..041cb80a26b1ff22f049abaed44b0a62c9b91a9c 100644 (file)
@@ -501,8 +501,9 @@ if CEC_PLATFORM_DRIVERS
 
 config VIDEO_SAMSUNG_S5P_CEC
        tristate "Samsung S5P CEC driver"
-       depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
-       select MEDIA_CEC_NOTIFIER
+       depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+       select CEC_CORE
+       select CEC_NOTIFIER
        ---help---
          This is a driver for Samsung S5P HDMI CEC interface. It uses the
          generic CEC framework interface.
@@ -511,8 +512,9 @@ config VIDEO_SAMSUNG_S5P_CEC
 
 config VIDEO_STI_HDMI_CEC
        tristate "STMicroelectronics STiH4xx HDMI CEC driver"
-       depends on CEC_CORE && (ARCH_STI || COMPILE_TEST)
-       select MEDIA_CEC_NOTIFIER
+       depends on ARCH_STI || COMPILE_TEST
+       select CEC_CORE
+       select CEC_NOTIFIER
        ---help---
          This is a driver for STIH4xx HDMI CEC interface. It uses the
          generic CEC framework interface.
index 57a842ff309747382a928998d0209072476da421..b7731b18ecae1741ebee3be1bf9188659bfeb398 100644 (file)
@@ -493,10 +493,10 @@ static int vdec_h264_get_param(unsigned long h_vdec,
 }
 
 static struct vdec_common_if vdec_h264_if = {
-       vdec_h264_init,
-       vdec_h264_decode,
-       vdec_h264_get_param,
-       vdec_h264_deinit,
+       .init           = vdec_h264_init,
+       .decode         = vdec_h264_decode,
+       .get_param      = vdec_h264_get_param,
+       .deinit         = vdec_h264_deinit,
 };
 
 struct vdec_common_if *get_h264_dec_comm_if(void);
index 6e7a62ae0842c2e69bb65e31bb1fa80e7c9ce44c..b9fad6a488799ebc7fad8b12b6990b9c33d7c60b 100644 (file)
@@ -620,10 +620,10 @@ static void vdec_vp8_deinit(unsigned long h_vdec)
 }
 
 static struct vdec_common_if vdec_vp8_if = {
-       vdec_vp8_init,
-       vdec_vp8_decode,
-       vdec_vp8_get_param,
-       vdec_vp8_deinit,
+       .init           = vdec_vp8_init,
+       .decode         = vdec_vp8_decode,
+       .get_param      = vdec_vp8_get_param,
+       .deinit         = vdec_vp8_deinit,
 };
 
 struct vdec_common_if *get_vp8_dec_comm_if(void);
index 5539b1853f166a611ed678bc1274f55e48f1347c..1daee1207469b3ea9e740676aa80765a4280c118 100644 (file)
@@ -979,10 +979,10 @@ static int vdec_vp9_get_param(unsigned long h_vdec,
 }
 
 static struct vdec_common_if vdec_vp9_if = {
-       vdec_vp9_init,
-       vdec_vp9_decode,
-       vdec_vp9_get_param,
-       vdec_vp9_deinit,
+       .init           = vdec_vp9_init,
+       .decode         = vdec_vp9_decode,
+       .get_param      = vdec_vp9_get_param,
+       .deinit         = vdec_vp9_deinit,
 };
 
 struct vdec_common_if *get_vp9_dec_comm_if(void);
index b36ac19dc6e48d60afbc4053fb848d9439dfcd5d..154de92dd809e74ff3d7539787856fef631229fc 100644 (file)
@@ -26,7 +26,8 @@ config VIDEO_VIVID
 
 config VIDEO_VIVID_CEC
        bool "Enable CEC emulation support"
-       depends on VIDEO_VIVID && CEC_CORE
+       depends on VIDEO_VIVID
+       select CEC_CORE
        ---help---
          When selected the vivid module will emulate the optional
          HDMI CEC feature.
index 90f66dc7c0d74dbed7cf370f652546eadc563bf8..a2fc1a1d58b0e317539a1a679e32f96c12d3b00f 100644 (file)
@@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
  */
 void ir_raw_event_handle(struct rc_dev *dev)
 {
-       if (!dev->raw)
+       if (!dev->raw || !dev->raw->thread)
                return;
 
        wake_up_process(dev->raw->thread);
@@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev)
 {
        int rc;
        struct ir_raw_handler *handler;
+       struct task_struct *thread;
 
        if (!dev)
                return -EINVAL;
@@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev)
         * because the event is coming from userspace
         */
        if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
-               dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
-                                              "rc%u", dev->minor);
+               thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
+                                    dev->minor);
 
-               if (IS_ERR(dev->raw->thread)) {
-                       rc = PTR_ERR(dev->raw->thread);
+               if (IS_ERR(thread)) {
+                       rc = PTR_ERR(thread);
                        goto out;
                }
+
+               dev->raw->thread = thread;
        }
 
        mutex_lock(&ir_raw_handler_lock);
index 8937f3986a01f1c89e3fcc5b6825cb6b754ddf20..18ead44824ba2bd02d27480e0f04ac69f39182fd 100644 (file)
@@ -1,6 +1,7 @@
 config USB_PULSE8_CEC
        tristate "Pulse Eight HDMI CEC"
-       depends on USB_ACM && CEC_CORE
+       depends on USB_ACM
+       select CEC_CORE
        select SERIO
        select SERIO_SERPORT
        ---help---
index 3eb86607efb8f627566af1ff374053d7f6b980ec..030ef01b1ff04137ede4a84ddf0357afe943343d 100644 (file)
@@ -1,6 +1,7 @@
 config USB_RAINSHADOW_CEC
        tristate "RainShadow Tech HDMI CEC"
-       depends on USB_ACM && CEC_CORE
+       depends on USB_ACM
+       select CEC_CORE
        select SERIO
        select SERIO_SERPORT
        ---help---
index 541ca543f71f4efe7e29e7c22bce114c2b18fc3d..71bd68548c9c87d3359a458efe9069c59a81e81a 100644 (file)
@@ -119,7 +119,7 @@ static void rain_irq_work_handler(struct work_struct *work)
 
        while (true) {
                unsigned long flags;
-               bool exit_loop;
+               bool exit_loop = false;
                char data;
 
                spin_lock_irqsave(&rain->buf_lock, flags);
index 35910f945bfad02823f7146c0746feb90aa2cda9..99e644cda4d13db301b713a5752788c0f646dfa1 100644 (file)
@@ -581,7 +581,7 @@ static int atmel_ebi_probe(struct platform_device *pdev)
        return of_platform_populate(np, NULL, NULL, dev);
 }
 
-static int atmel_ebi_resume(struct device *dev)
+static __maybe_unused int atmel_ebi_resume(struct device *dev)
 {
        struct atmel_ebi *ebi = dev_get_drvdata(dev);
        struct atmel_ebi_dev *ebid;
index 17b433f1ce23b7deeb2e3f35139d3bef57ef20d0..0761271d68c5613b23152c03522f6a388950ea86 100644 (file)
@@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
 
        /* Do this outside the status_mutex to avoid a circular dependency with
         * the locking in cxl_mmap_fault() */
-       if (copy_from_user(&work, uwork,
-                          sizeof(struct cxl_ioctl_start_work))) {
-               rc = -EFAULT;
-               goto out;
-       }
+       if (copy_from_user(&work, uwork, sizeof(work)))
+               return -EFAULT;
 
        mutex_lock(&ctx->status_mutex);
        if (ctx->status != OPENED) {
index 871a2f09c71845b2803bab920618c36de634e4fc..8d6ea9712dbd1830fcdc5d6eecda3d28d69a9376 100644 (file)
@@ -1302,13 +1302,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
 
 void cxl_native_release_psl_err_irq(struct cxl *adapter)
 {
-       if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
+       if (adapter->native->err_virq == 0 ||
+           adapter->native->err_virq !=
+           irq_find_mapping(NULL, adapter->native->err_hwirq))
                return;
 
        cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
        cxl_unmap_irq(adapter->native->err_virq, adapter);
        cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
        kfree(adapter->irq_name);
+       adapter->native->err_virq = 0;
 }
 
 int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1346,13 +1349,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
 
 void cxl_native_release_serr_irq(struct cxl_afu *afu)
 {
-       if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+       if (afu->serr_virq == 0 ||
+           afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
                return;
 
        cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
        cxl_unmap_irq(afu->serr_virq, afu);
        cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
        kfree(afu->err_irq_name);
+       afu->serr_virq = 0;
 }
 
 int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1375,12 +1380,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
 
 void cxl_native_release_psl_irq(struct cxl_afu *afu)
 {
-       if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
+       if (afu->native->psl_virq == 0 ||
+           afu->native->psl_virq !=
+           irq_find_mapping(NULL, afu->native->psl_hwirq))
                return;
 
        cxl_unmap_irq(afu->native->psl_virq, afu);
        cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
        kfree(afu->psl_irq_name);
+       afu->native->psl_virq = 0;
 }
 
 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
index c862cd4583cc93694747e191f5e3537e5767bfa5..b8069eec18cb44ef335535789f0e2a61ffaf4bd1 100644 (file)
@@ -309,6 +309,9 @@ static inline enum xp_retval
 xpc_send(short partid, int ch_number, u32 flags, void *payload,
         u16 payload_size)
 {
+       if (!xpc_interface.send)
+               return xpNotLoaded;
+
        return xpc_interface.send(partid, ch_number, flags, payload,
                                  payload_size);
 }
@@ -317,6 +320,9 @@ static inline enum xp_retval
 xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
                u16 payload_size, xpc_notify_func func, void *key)
 {
+       if (!xpc_interface.send_notify)
+               return xpNotLoaded;
+
        return xpc_interface.send_notify(partid, ch_number, flags, payload,
                                         payload_size, func, key);
 }
@@ -324,12 +330,16 @@ xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
 static inline void
 xpc_received(short partid, int ch_number, void *payload)
 {
-       return xpc_interface.received(partid, ch_number, payload);
+       if (xpc_interface.received)
+               xpc_interface.received(partid, ch_number, payload);
 }
 
 static inline enum xp_retval
 xpc_partid_to_nasids(short partid, void *nasids)
 {
+       if (!xpc_interface.partid_to_nasids)
+               return xpNotLoaded;
+
        return xpc_interface.partid_to_nasids(partid, nasids);
 }
 
index 01be66d02ca8ce52c84b809fa55a7aeb6b219bc2..6d7f557fd1c1a1e885eb3dad64886b3e0afe32fe 100644 (file)
@@ -69,23 +69,9 @@ struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
 EXPORT_SYMBOL_GPL(xpc_registrations);
 
 /*
- * Initialize the XPC interface to indicate that XPC isn't loaded.
+ * Initialize the XPC interface to NULL to indicate that XPC isn't loaded.
  */
-static enum xp_retval
-xpc_notloaded(void)
-{
-       return xpNotLoaded;
-}
-
-struct xpc_interface xpc_interface = {
-       (void (*)(int))xpc_notloaded,
-       (void (*)(int))xpc_notloaded,
-       (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
-       (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
-                          void *))xpc_notloaded,
-       (void (*)(short, int, void *))xpc_notloaded,
-       (enum xp_retval(*)(short, void *))xpc_notloaded
-};
+struct xpc_interface xpc_interface = { };
 EXPORT_SYMBOL_GPL(xpc_interface);
 
 /*
@@ -115,17 +101,7 @@ EXPORT_SYMBOL_GPL(xpc_set_interface);
 void
 xpc_clear_interface(void)
 {
-       xpc_interface.connect = (void (*)(int))xpc_notloaded;
-       xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
-       xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
-           xpc_notloaded;
-       xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
-                                                      u16, xpc_notify_func,
-                                                      void *))xpc_notloaded;
-       xpc_interface.received = (void (*)(short, int, void *))
-           xpc_notloaded;
-       xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
-           xpc_notloaded;
+       memset(&xpc_interface, 0, sizeof(xpc_interface));
 }
 EXPORT_SYMBOL_GPL(xpc_clear_interface);
 
@@ -188,7 +164,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
 
        mutex_unlock(&registration->mutex);
 
-       xpc_interface.connect(ch_number);
+       if (xpc_interface.connect)
+               xpc_interface.connect(ch_number);
 
        return xpSuccess;
 }
@@ -237,7 +214,8 @@ xpc_disconnect(int ch_number)
        registration->assigned_limit = 0;
        registration->idle_limit = 0;
 
-       xpc_interface.disconnect(ch_number);
+       if (xpc_interface.disconnect)
+               xpc_interface.disconnect(ch_number);
 
        mutex_unlock(&registration->mutex);
 
index d474378ed810b3c3ab19d8a4e85e77e0eb15511c..b1dd12729f19b29ea8f35886aba02cc986990661 100644 (file)
@@ -202,7 +202,7 @@ static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
        return 0;
 }
 
-const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
        .ecc = nand_ooblayout_ecc_lp_hamming,
        .free = nand_ooblayout_free_lp_hamming,
 };
@@ -4361,7 +4361,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
        /* Initialize the ->data_interface field. */
        ret = nand_init_data_interface(chip);
        if (ret)
-               return ret;
+               goto err_nand_init;
 
        /*
         * Setup the data interface correctly on the chip and controller side.
@@ -4373,7 +4373,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
         */
        ret = nand_setup_data_interface(chip);
        if (ret)
-               return ret;
+               goto err_nand_init;
 
        nand_maf_id = chip->id.data[0];
        nand_dev_id = chip->id.data[1];
@@ -4404,6 +4404,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
        mtd->size = i * chip->chipsize;
 
        return 0;
+
+err_nand_init:
+       /* Free manufacturer priv data. */
+       nand_manufacturer_cleanup(chip);
+
+       return ret;
 }
 EXPORT_SYMBOL(nand_scan_ident);
 
@@ -4574,18 +4580,23 @@ int nand_scan_tail(struct mtd_info *mtd)
 
        /* New bad blocks should be marked in OOB, flash-based BBT, or both */
        if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
-                  !(chip->bbt_options & NAND_BBT_USE_FLASH)))
-               return -EINVAL;
+                  !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
+               ret = -EINVAL;
+               goto err_ident;
+       }
 
        if (invalid_ecc_page_accessors(chip)) {
                pr_err("Invalid ECC page accessors setup\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_ident;
        }
 
        if (!(chip->options & NAND_OWN_BUFFERS)) {
                nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
-               if (!nbuf)
-                       return -ENOMEM;
+               if (!nbuf) {
+                       ret = -ENOMEM;
+                       goto err_ident;
+               }
 
                nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
                if (!nbuf->ecccalc) {
@@ -4608,8 +4619,10 @@ int nand_scan_tail(struct mtd_info *mtd)
 
                chip->buffers = nbuf;
        } else {
-               if (!chip->buffers)
-                       return -ENOMEM;
+               if (!chip->buffers) {
+                       ret = -ENOMEM;
+                       goto err_ident;
+               }
        }
 
        /* Set the internal oob buffer location, just after the page data */
@@ -4842,7 +4855,11 @@ int nand_scan_tail(struct mtd_info *mtd)
                return 0;
 
        /* Build bad block table */
-       return chip->scan_bbt(mtd);
+       ret = chip->scan_bbt(mtd);
+       if (ret)
+               goto err_free;
+       return 0;
+
 err_free:
        if (nbuf) {
                kfree(nbuf->databuf);
@@ -4850,6 +4867,13 @@ err_free:
                kfree(nbuf->ecccalc);
                kfree(nbuf);
        }
+
+err_ident:
+       /* Clean up nand_scan_ident(). */
+
+       /* Free manufacturer priv data. */
+       nand_manufacturer_cleanup(chip);
+
        return ret;
 }
 EXPORT_SYMBOL(nand_scan_tail);
index 9d5ca0e540b5bc5c9e3be9a6a3b476624e63be14..92e2cf8e9ff9066860973caaf60ecb95e2d4a599 100644 (file)
@@ -6,7 +6,6 @@
  * published by the Free Software Foundation.
  *
  */
-#include <linux/module.h>
 #include <linux/mtd/nand.h>
 #include <linux/sizes.h>
 
index 9cfc4035a420a3eae2d2a451c97f5300f8bc8b80..1e0755997762aa23ee953ad20f2a88d24dc7139c 100644 (file)
@@ -84,6 +84,9 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
                        case 7:
                                chip->ecc_strength_ds = 60;
                                break;
+                       default:
+                               WARN(1, "Could not decode ECC info");
+                               chip->ecc_step_ds = 0;
                        }
                }
        } else {
index 05b6e106520331ddd48f619f798d21c4cfeb2059..49b286c6c10fc85e5ee7e75f4dd10d231c86c73f 100644 (file)
  * byte 1 for other packets in the page (PKT_N, for N > 0)
  * ERR_COUNT_PKT_N is the max error count over all but the first packet.
  */
-#define DECODE_OK_PKT_0(v)     ((v) & BIT(7))
-#define DECODE_OK_PKT_N(v)     ((v) & BIT(15))
 #define ERR_COUNT_PKT_0(v)     (((v) >> 0) & 0x3f)
 #define ERR_COUNT_PKT_N(v)     (((v) >> 8) & 0x3f)
+#define DECODE_FAIL_PKT_0(v)   (((v) & BIT(7)) == 0)
+#define DECODE_FAIL_PKT_N(v)   (((v) & BIT(15)) == 0)
 
 /* Offsets relative to pbus_base */
 #define PBUS_CS_CTRL   0x83c
@@ -193,6 +193,8 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
                                                  chip->ecc.strength);
                if (res < 0)
                        mtd->ecc_stats.failed++;
+               else
+                       mtd->ecc_stats.corrected += res;
 
                bitflips = max(res, bitflips);
                buf += pkt_size;
@@ -202,9 +204,11 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
        return bitflips;
 }
 
-static int decode_error_report(struct tango_nfc *nfc)
+static int decode_error_report(struct nand_chip *chip)
 {
        u32 status, res;
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       struct tango_nfc *nfc = to_tango_nfc(chip->controller);
 
        status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
        if (status & PAGE_IS_EMPTY)
@@ -212,10 +216,14 @@ static int decode_error_report(struct tango_nfc *nfc)
 
        res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
 
-       if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res))
-               return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+       if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res))
+               return -EBADMSG;
+
+       /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */
+       mtd->ecc_stats.corrected +=
+               ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res);
 
-       return -EBADMSG;
+       return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
 }
 
 static void tango_dma_callback(void *arg)
@@ -282,7 +290,7 @@ static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
        if (err)
                return err;
 
-       res = decode_error_report(nfc);
+       res = decode_error_report(chip);
        if (res < 0) {
                chip->ecc.read_oob_raw(mtd, chip, page);
                res = check_erased_page(chip, buf);
@@ -663,6 +671,7 @@ static const struct of_device_id tango_nand_ids[] = {
        { .compatible = "sigma,smp8758-nand" },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, tango_nand_ids);
 
 static struct platform_driver tango_nand_driver = {
        .probe  = tango_nand_probe,
index 96046bb12ca17333530f237fddb46438c3298dea..14c0be98e0a4d449aa4122c2db6e9ef6af007c84 100644 (file)
@@ -114,13 +114,13 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
        return -EOPNOTSUPP;
 }
 
-int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
-                          int src_port, u16 data)
+static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
+                                        int src_dev, int src_port, u16 data)
 {
        return -EOPNOTSUPP;
 }
 
-int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
+static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
 {
        return -EOPNOTSUPP;
 }
index b3bc87fe3764e397e4a9ce19518866cb97530771..0a98c369df2045ccbb9fbf7a55af848530a5f464 100644 (file)
@@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
                              struct xgbe_ring *ring,
                              struct xgbe_ring_data *rdata)
 {
-       int order, ret;
+       int ret;
 
        if (!ring->rx_hdr_pa.pages) {
                ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
@@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
        }
 
        if (!ring->rx_buf_pa.pages) {
-               order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
                ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
-                                      order);
+                                      PAGE_ALLOC_COSTLY_ORDER);
                if (ret)
                        return ret;
        }
index 099b374c1b17bbd8e9cabe68cdc7cd991a258737..5274501428e4fb05850bada0d4ff3cd8a346f59f 100644 (file)
@@ -2026,9 +2026,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        priv->num_rx_desc_words = params->num_rx_desc_words;
 
        priv->irq0 = platform_get_irq(pdev, 0);
-       if (!priv->is_lite)
+       if (!priv->is_lite) {
                priv->irq1 = platform_get_irq(pdev, 1);
-       priv->wol_irq = platform_get_irq(pdev, 2);
+               priv->wol_irq = platform_get_irq(pdev, 2);
+       } else {
+               priv->wol_irq = platform_get_irq(pdev, 1);
+       }
        if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
                dev_err(&pdev->dev, "invalid interrupts\n");
                ret = -EINVAL;
index eccb3d1b6abb748c14567d0efcdc28a405b64fb5..5f49334dcad5a8c8602cc3aa2e8795b2d489bb43 100644 (file)
@@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
        }
 
        /* select a non-FCoE queue */
-       return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+       return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
index 38a5c6764bb50f45124c212db37e11d2cc777076..77ed2f628f9ca23854ae8b062ff919ce6d2e3425 100644 (file)
@@ -2196,10 +2196,14 @@ static int cxgb_up(struct adapter *adap)
                if (err)
                        goto irq_err;
        }
+
+       mutex_lock(&uld_mutex);
        enable_rx(adap);
        t4_sge_start(adap);
        t4_intr_enable(adap);
        adap->flags |= FULL_INIT_DONE;
+       mutex_unlock(&uld_mutex);
+
        notify_ulds(adap, CXGB4_STATE_UP);
 #if IS_ENABLED(CONFIG_IPV6)
        update_clip(adap);
@@ -2771,6 +2775,9 @@ void t4_fatal_err(struct adapter *adap)
 {
        int port;
 
+       if (pci_channel_offline(adap->pdev))
+               return;
+
        /* Disable the SGE since ULDs are going to free resources that
         * could be exposed to the adapter.  RDMA MWs for example...
         */
@@ -3882,9 +3889,10 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
        spin_lock(&adap->stats_lock);
        for_each_port(adap, i) {
                struct net_device *dev = adap->port[i];
-
-               netif_device_detach(dev);
-               netif_carrier_off(dev);
+               if (dev) {
+                       netif_device_detach(dev);
+                       netif_carrier_off(dev);
+               }
        }
        spin_unlock(&adap->stats_lock);
        disable_interrupts(adap);
@@ -3963,12 +3971,13 @@ static void eeh_resume(struct pci_dev *pdev)
        rtnl_lock();
        for_each_port(adap, i) {
                struct net_device *dev = adap->port[i];
-
-               if (netif_running(dev)) {
-                       link_start(dev);
-                       cxgb_set_rxmode(dev);
+               if (dev) {
+                       if (netif_running(dev)) {
+                               link_start(dev);
+                               cxgb_set_rxmode(dev);
+                       }
+                       netif_device_attach(dev);
                }
-               netif_device_attach(dev);
        }
        rtnl_unlock();
 }
index aded42b96f6d966ba7e814c0a89c738968a655b6..3a34aa629f7dd81a56e6b5c1c63bfdeb685c3f25 100644 (file)
@@ -4557,8 +4557,13 @@ void t4_intr_enable(struct adapter *adapter)
  */
 void t4_intr_disable(struct adapter *adapter)
 {
-       u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
-       u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+       u32 whoami, pf;
+
+       if (pci_channel_offline(adapter->pdev))
+               return;
+
+       whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+       pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
                        SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
        t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
index 3549d387627888a2629b5f07dd1b001d2db1fc70..f2d623a7aee04e21f1e4e52645d66788a59341ab 100644 (file)
@@ -37,7 +37,7 @@
 
 #define T4FW_VERSION_MAJOR 0x01
 #define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x2B
+#define T4FW_VERSION_MICRO 0x2D
 #define T4FW_VERSION_BUILD 0x00
 
 #define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
 
 #define T5FW_VERSION_MAJOR 0x01
 #define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x2B
+#define T5FW_VERSION_MICRO 0x2D
 #define T5FW_VERSION_BUILD 0x00
 
 #define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
 
 #define T6FW_VERSION_MAJOR 0x01
 #define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x2B
+#define T6FW_VERSION_MICRO 0x2D
 #define T6FW_VERSION_BUILD 0x00
 
 #define T6FW_MIN_VERSION_MAJOR 0x00
index e863ba74d005d7f255931b336825df2abadd2fc8..8bb0db990c8fcf8258201f1af5fcf3fa9976b5f9 100644 (file)
@@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev)
        if (ret)
                return ret;
 
+       napi_enable(&priv->napi);
+
        ethoc_init_ring(priv, dev->mem_start);
        ethoc_reset(priv);
 
@@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev)
        priv->old_duplex = -1;
 
        phy_start(dev->phydev);
-       napi_enable(&priv->napi);
 
        if (netif_msg_ifup(priv)) {
                dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
index 446c7b374ff5c36712d5813cf94b4e9b4ca0b01e..a10de1e9c157d2590eb19122f27fd5dda1a4816b 100644 (file)
@@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
 {
        const struct of_device_id *id =
                of_match_device(fsl_pq_mdio_match, &pdev->dev);
-       const struct fsl_pq_mdio_data *data = id->data;
+       const struct fsl_pq_mdio_data *data;
        struct device_node *np = pdev->dev.of_node;
        struct resource res;
        struct device_node *tbi;
@@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
        struct mii_bus *new_bus;
        int err;
 
+       if (!id) {
+               dev_err(&pdev->dev, "Failed to match device\n");
+               return -ENODEV;
+       }
+
+       data = id->data;
+
        dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
 
        new_bus = mdiobus_alloc_size(sizeof(*priv));
index 4f2d329dba998308eeb2ddaed52733cb3059f605..a93757c255f77445e2245ee8b065d2c6ee31cf3f 100644 (file)
@@ -81,7 +81,7 @@
 static const char ibmvnic_driver_name[] = "ibmvnic";
 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
 
-MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
+MODULE_AUTHOR("Santiago Leon");
 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
index d5c9c9e06ff57e21c1e28d09b9eea18af6e22f1a..150caf6ca2b4bb1da5e0ea63f37fd086c058efad 100644 (file)
@@ -295,7 +295,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
  **/
 void i40e_service_event_schedule(struct i40e_pf *pf)
 {
-       if (!test_bit(__I40E_VSI_DOWN, pf->state) &&
+       if (!test_bit(__I40E_DOWN, pf->state) &&
            !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
                queue_work(i40e_wq, &pf->service_task);
 }
@@ -3611,7 +3611,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
                 * this is not a performance path and napi_schedule()
                 * can deal with rescheduling.
                 */
-               if (!test_bit(__I40E_VSI_DOWN, pf->state))
+               if (!test_bit(__I40E_DOWN, pf->state))
                        napi_schedule_irqoff(&q_vector->napi);
        }
 
@@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
 enable_intr:
        /* re-enable interrupt causes */
        wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
-       if (!test_bit(__I40E_VSI_DOWN, pf->state)) {
+       if (!test_bit(__I40E_DOWN, pf->state)) {
                i40e_service_event_schedule(pf);
                i40e_irq_dynamic_enable_icr0(pf, false);
        }
@@ -6203,7 +6203,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
 {
 
        /* if interface is down do nothing */
-       if (test_bit(__I40E_VSI_DOWN, pf->state))
+       if (test_bit(__I40E_DOWN, pf->state))
                return;
 
        if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
@@ -6344,7 +6344,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
        int i;
 
        /* if interface is down do nothing */
-       if (test_bit(__I40E_VSI_DOWN, pf->state) ||
+       if (test_bit(__I40E_DOWN, pf->state) ||
            test_bit(__I40E_CONFIG_BUSY, pf->state))
                return;
 
@@ -6399,9 +6399,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
                reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
        }
-       if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) {
-               reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED);
-               clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state);
+       if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
+               reset_flags |= BIT(__I40E_DOWN_REQUESTED);
+               clear_bit(__I40E_DOWN_REQUESTED, pf->state);
        }
 
        /* If there's a recovery already waiting, it takes
@@ -6415,7 +6415,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 
        /* If we're already down or resetting, just bail */
        if (reset_flags &&
-           !test_bit(__I40E_VSI_DOWN, pf->state) &&
+           !test_bit(__I40E_DOWN, pf->state) &&
            !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
                rtnl_lock();
                i40e_do_reset(pf, reset_flags, true);
@@ -7002,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
        u32 val;
        int v;
 
-       if (test_bit(__I40E_VSI_DOWN, pf->state))
+       if (test_bit(__I40E_DOWN, pf->state))
                goto clear_recovery;
        dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
 
@@ -9767,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
                return -ENODEV;
        }
        if (vsi == pf->vsi[pf->lan_vsi] &&
-           !test_bit(__I40E_VSI_DOWN, pf->state)) {
+           !test_bit(__I40E_DOWN, pf->state)) {
                dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
                return -ENODEV;
        }
@@ -11003,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        pf->next_vsi = 0;
        pf->pdev = pdev;
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
 
        hw = &pf->hw;
        hw->back = pf;
@@ -11293,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * before setting up the misc vector or we get a race and the vector
         * ends up disabled forever.
         */
-       clear_bit(__I40E_VSI_DOWN, pf->state);
+       clear_bit(__I40E_DOWN, pf->state);
 
        /* In case of MSIX we are going to setup the misc vector right here
         * to handle admin queue events etc. In case of legacy and MSI
@@ -11448,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* Unwind what we've done if something failed in the setup */
 err_vsis:
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
        i40e_clear_interrupt_scheme(pf);
        kfree(pf->vsi);
 err_switch_setup:
@@ -11500,7 +11500,7 @@ static void i40e_remove(struct pci_dev *pdev)
 
        /* no more scheduling of any task */
        set_bit(__I40E_SUSPENDED, pf->state);
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
        if (pf->service_timer.data)
                del_timer_sync(&pf->service_timer);
        if (pf->service_task.func)
@@ -11740,7 +11740,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
        struct i40e_hw *hw = &pf->hw;
 
        set_bit(__I40E_SUSPENDED, pf->state);
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
        rtnl_lock();
        i40e_prep_for_reset(pf, true);
        rtnl_unlock();
@@ -11789,7 +11789,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
        int retval = 0;
 
        set_bit(__I40E_SUSPENDED, pf->state);
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
 
        if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
                i40e_enable_mc_magic_wake(pf);
@@ -11841,7 +11841,7 @@ static int i40e_resume(struct pci_dev *pdev)
 
        /* handling the reset will rebuild the device state */
        if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
-               clear_bit(__I40E_VSI_DOWN, pf->state);
+               clear_bit(__I40E_DOWN, pf->state);
                rtnl_lock();
                i40e_reset_and_rebuild(pf, false, true);
                rtnl_unlock();
index 29321a6167a6675757e74ab4e3cd1a100cb423b0..cd894f4023b1b68cc4e202ff7064e63e2f8be031 100644 (file)
@@ -1854,7 +1854,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = SKB_DATA_ALIGN(size);
+       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+                               SKB_DATA_ALIGN(I40E_SKB_PAD + size);
 #endif
        struct sk_buff *skb;
 
index dfe241a12ad0756d10a77b954486d2194a31c5ca..12b02e5305038d55fcee5d2109b4ab534a09b1bb 100644 (file)
@@ -1190,7 +1190,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = SKB_DATA_ALIGN(size);
+       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+                               SKB_DATA_ALIGN(I40E_SKB_PAD + size);
 #endif
        struct sk_buff *skb;
 
index ae5fdc2df65412afce4e72b8384c9c4b3302c56e..ffbcb27c05e55f43630a812249bab21609886dd9 100644 (file)
@@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev,
                qpn = priv->drop_qp.qpn;
        else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
                qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
-               if (qpn < priv->rss_map.base_qpn ||
-                   qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
-                       en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
-                       return -EINVAL;
-               }
        } else {
                if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
                        en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
index 1a670b68155550fe9f61fb2bc2c7a3688391249c..0710b367746468f1d4faeb5b8a8f3266ca941674 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/etherdevice.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 #include <linux/export.h>
 
 #include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
 
+       if (!mlx4_qp_lookup(dev, rule->qpn)) {
+               mlx4_err_rule(dev, "QP doesn't exist\n", rule);
+               ret = -EINVAL;
+               goto out;
+       }
+
        trans_rule_ctrl_to_hw(rule, mailbox->buf);
 
        size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
 
        list_for_each_entry(cur, &rule->list, list) {
                ret = parse_trans_rule(dev, cur, mailbox->buf + size);
-               if (ret < 0) {
-                       mlx4_free_cmd_mailbox(dev, mailbox);
-                       return ret;
-               }
+               if (ret < 0)
+                       goto out;
+
                size += ret;
        }
 
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
                }
        }
 
+out:
        mlx4_free_cmd_mailbox(dev, mailbox);
 
        return ret;
index 2d6abd4662b143612769ef9b91783249bcd2ac8b..5a310d313e94d08d035c265e6b538c27dcf957c3 100644 (file)
@@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
                __mlx4_qp_free_icm(dev, qpn);
 }
 
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+       struct mlx4_qp *qp;
+
+       spin_lock(&qp_table->lock);
+
+       qp = __mlx4_qp_lookup(dev, qpn);
+
+       spin_unlock(&qp_table->lock);
+       return qp;
+}
+
 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -471,6 +484,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
        }
 
        if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+               if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
+                       mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
+                       err = -EOPNOTSUPP;
+                       goto out;
+               }
+
                qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
                cmd->qp_context.qos_vport = params->qos_vport;
        }
index 07516545474f3ac76e750aaa4af2532b6ac81207..812783865205715e8e88ad66d4ccbfe7172ec6e5 100644 (file)
@@ -5255,6 +5255,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
 }
 
+static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
+                          struct mlx4_vf_immed_vlan_work *work)
+{
+       ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
+       ctx->qp_context.qos_vport = work->qos_vport;
+}
+
 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
 {
        struct mlx4_vf_immed_vlan_work *work =
@@ -5369,11 +5376,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
                                        qp->sched_queue & 0xC7;
                                upd_context->qp_context.pri_path.sched_queue |=
                                        ((work->qos & 0x7) << 3);
-                               upd_context->qp_mask |=
-                                       cpu_to_be64(1ULL <<
-                                                   MLX4_UPD_QP_MASK_QOS_VPP);
-                               upd_context->qp_context.qos_vport =
-                                       work->qos_vport;
+
+                               if (dev->caps.flags2 &
+                                   MLX4_DEV_CAP_FLAG2_QOS_VPP)
+                                       update_qos_vpp(upd_context, work);
                        }
 
                        err = mlx4_cmd(dev, mailbox->dma,
index fe5546bb41537f0af0c4bcfe9054ccceaa42bbb2..af945edfee1905dbe676218cb53123535a37171f 100644 (file)
@@ -621,10 +621,9 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
        cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
                        priv->irq_info[i].mask);
 
-#ifdef CONFIG_SMP
-       if (irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+       if (IS_ENABLED(CONFIG_SMP) &&
+           irq_set_affinity_hint(irq, priv->irq_info[i].mask))
                mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
-#endif
 
        return 0;
 }
index 537d1236a4fec0a2973d52ed34cf7f73d8a4b052..715b3aaf83ac4d65cdea4eb15f6f2089e402c469 100644 (file)
@@ -1730,7 +1730,8 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
                qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
                break;
        default:
-               DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+               DP_VERBOSE(cdev, QED_MSG_SP,
+                          "Invalid protocol type = %d\n", type);
                return;
        }
 }
index 7245b1072518fff31566c471b6eb32b512e41846..81312924df1407092fd1dd43cc0555d16976160b 100644 (file)
@@ -1824,22 +1824,44 @@ struct qlcnic_hardware_ops {
        u32 (*get_cap_size)(void *, int);
        void (*set_sys_info)(void *, int, u32);
        void (*store_cap_mask)(void *, u32);
+       bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
+       bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
 };
 
 extern struct qlcnic_nic_template qlcnic_vf_ops;
 
-static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
 {
        return adapter->ahw->extra_capability[0] &
               QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
 }
 
-static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
 {
        return adapter->ahw->extra_capability[0] &
               QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
 }
 
+static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+       return false;
+}
+
+static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+        return false;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+        return adapter->ahw->hw_ops->encap_rx_offload(adapter);
+}
+
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+        return adapter->ahw->hw_ops->encap_tx_offload(adapter);
+}
+
 static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
 {
        return adapter->nic_ops->start_firmware(adapter);
index 4fb68797630e9531e7ffc4e7bc7c015313a44063..f7080d0ab8746263c6955163640964eac2fd2cb0 100644 (file)
@@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
        .get_cap_size                   = qlcnic_83xx_get_cap_size,
        .set_sys_info                   = qlcnic_83xx_set_sys_info,
        .store_cap_mask                 = qlcnic_83xx_store_cap_mask,
+       .encap_rx_offload               = qlcnic_83xx_encap_rx_offload,
+       .encap_tx_offload               = qlcnic_83xx_encap_tx_offload,
 };
 
 static struct qlcnic_nic_template qlcnic_83xx_ops = {
index 838cc0ceafd8d0824495206bf30cdaf53f98bc59..7848cf04b29a83f0a356b3eb5360d6a6e65871f0 100644 (file)
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
                        }
                        return -EIO;
                }
-               usleep_range(1000, 1500);
+               udelay(1200);
        }
 
        if (id_reg)
index b6628aaa6e4a45a8eaecd9d5fc4ea8136d7a07af..1b5f7d57b6f8fed6a8b232adfdee76b2cbaff13f 100644 (file)
@@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
        .get_cap_size                   = qlcnic_82xx_get_cap_size,
        .set_sys_info                   = qlcnic_82xx_set_sys_info,
        .store_cap_mask                 = qlcnic_82xx_store_cap_mask,
+       .encap_rx_offload               = qlcnic_82xx_encap_rx_offload,
+       .encap_tx_offload               = qlcnic_82xx_encap_tx_offload,
 };
 
 static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
index 2f656f395f39699e4cec53f4ff25ea7e745d1041..c58180f408448e9a86a7ce40c6fb286063a6afb0 100644 (file)
@@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
        .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
        .enable_sds_intr                = qlcnic_83xx_enable_sds_intr,
        .disable_sds_intr               = qlcnic_83xx_disable_sds_intr,
+       .encap_rx_offload               = qlcnic_83xx_encap_rx_offload,
+       .encap_tx_offload               = qlcnic_83xx_encap_tx_offload,
 };
 
 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
index cc065ffbe4b5584a6498237d1e4a929ff1d6ebd0..bcd4708b374574fb06faf28d9b0a6cc90bc9c56d 100644 (file)
@@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt)
        emac_mac_config(adpt);
        emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
 
-       adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
+       adpt->phydev->irq = PHY_POLL;
        ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
                                 PHY_INTERFACE_MODE_SGMII);
        if (ret) {
index 441c1936648993fa394e5c676b7bd9957e52efa3..18461fcb981501efd7015634999cb787041c01a7 100644 (file)
 /* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
  */
 
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_net.h>
 #include <linux/of_mdio.h>
 #include <linux/phy.h>
 #include <linux/iopoll.h>
 #include <linux/acpi.h>
 #include "emac.h"
-#include "emac-mac.h"
 
 /* EMAC base register offsets */
 #define EMAC_MDIO_CTRL                                        0x001414
 
 #define MDIO_WAIT_TIMES                                           1000
 
-#define EMAC_LINK_SPEED_DEFAULT (\
-               EMAC_LINK_SPEED_10_HALF  |\
-               EMAC_LINK_SPEED_10_FULL  |\
-               EMAC_LINK_SPEED_100_HALF |\
-               EMAC_LINK_SPEED_100_FULL |\
-               EMAC_LINK_SPEED_1GB_FULL)
-
-/**
- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
- * @adpt: the emac adapter
- *
- * The autopoll feature takes over the MDIO bus.  In order for
- * the PHY driver to be able to talk to the PHY over the MDIO
- * bus, we need to temporarily disable the autopoll feature.
- */
-static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
-{
-       u32 val;
-
-       /* disable autopoll */
-       emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
-
-       /* wait for any mdio polling to complete */
-       if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
-                               !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
-               return 0;
-
-       /* failed to disable; ensure it is enabled before returning */
-       emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
-
-       return -EBUSY;
-}
-
-/**
- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
- * @adpt: the emac adapter
- *
- * The EMAC has the ability to poll the external PHY on the MDIO
- * bus for link state changes.  This eliminates the need for the
- * driver to poll the phy.  If if the link state does change,
- * the EMAC issues an interrupt on behalf of the PHY.
- */
-static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
-{
-       emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
-}
-
 static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
 {
        struct emac_adapter *adpt = bus->priv;
        u32 reg;
-       int ret;
-
-       ret = emac_phy_mdio_autopoll_disable(adpt);
-       if (ret)
-               return ret;
 
        emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
                          (addr << PHY_ADDR_SHFT));
@@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
        if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
                               !(reg & (MDIO_START | MDIO_BUSY)),
                               100, MDIO_WAIT_TIMES * 100))
-               ret = -EIO;
-       else
-               ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
+               return -EIO;
 
-       emac_phy_mdio_autopoll_enable(adpt);
-
-       return ret;
+       return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
 }
 
 static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
 {
        struct emac_adapter *adpt = bus->priv;
        u32 reg;
-       int ret;
-
-       ret = emac_phy_mdio_autopoll_disable(adpt);
-       if (ret)
-               return ret;
 
        emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
                          (addr << PHY_ADDR_SHFT));
@@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
        if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
                               !(reg & (MDIO_START | MDIO_BUSY)), 100,
                               MDIO_WAIT_TIMES * 100))
-               ret = -EIO;
+               return -EIO;
 
-       emac_phy_mdio_autopoll_enable(adpt);
-
-       return ret;
+       return 0;
 }
 
 /* Configure the MDIO bus and connect the external PHY */
index 28a8cdc364851e56a5757a8f2970853c0a462cc4..98a326faea294eec0c59f9b0ffe15d57046ce5eb 100644 (file)
 #define DMAR_DLY_CNT_DEF                                   15
 #define DMAW_DLY_CNT_DEF                                    4
 
-#define IMR_NORMAL_MASK         (\
-               ISR_ERROR       |\
-               ISR_GPHY_LINK   |\
-               ISR_TX_PKT      |\
-               GPHY_WAKEUP_INT)
-
-#define IMR_EXTENDED_MASK       (\
-               SW_MAN_INT      |\
-               ISR_OVER        |\
-               ISR_ERROR       |\
-               ISR_GPHY_LINK   |\
-               ISR_TX_PKT      |\
-               GPHY_WAKEUP_INT)
+#define IMR_NORMAL_MASK                (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
 
 #define ISR_TX_PKT      (\
        TX_PKT_INT      |\
        TX_PKT_INT2     |\
        TX_PKT_INT3)
 
-#define ISR_GPHY_LINK        (\
-       GPHY_LINK_UP_INT     |\
-       GPHY_LINK_DOWN_INT)
-
 #define ISR_OVER        (\
        RFD0_UR_INT     |\
        RFD1_UR_INT     |\
@@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data)
        if (status & ISR_OVER)
                net_warn_ratelimited("warning: TX/RX overflow\n");
 
-       /* link event */
-       if (status & ISR_GPHY_LINK)
-               phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
-
 exit:
        /* enable the interrupt */
        writel(irq->mask, adpt->base + EMAC_INT_MASK);
index 3cd7989c007dfe46947e2ddb366a904f1af90198..784782da3a85b638e9e2a195fe66b15c72fe0fe5 100644 (file)
@@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
        int ring_size;
        int i;
 
-       /* Free RX skb ringbuffer */
-       if (priv->rx_skb[q]) {
-               for (i = 0; i < priv->num_rx_ring[q]; i++)
-                       dev_kfree_skb(priv->rx_skb[q][i]);
-       }
-       kfree(priv->rx_skb[q]);
-       priv->rx_skb[q] = NULL;
-
-       /* Free aligned TX buffers */
-       kfree(priv->tx_align[q]);
-       priv->tx_align[q] = NULL;
-
        if (priv->rx_ring[q]) {
                for (i = 0; i < priv->num_rx_ring[q]; i++) {
                        struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
                priv->tx_ring[q] = NULL;
        }
 
+       /* Free RX skb ringbuffer */
+       if (priv->rx_skb[q]) {
+               for (i = 0; i < priv->num_rx_ring[q]; i++)
+                       dev_kfree_skb(priv->rx_skb[q][i]);
+       }
+       kfree(priv->rx_skb[q]);
+       priv->rx_skb[q] = NULL;
+
+       /* Free aligned TX buffers */
+       kfree(priv->tx_align[q]);
+       priv->tx_align[q] = NULL;
+
        /* Free TX skb ringbuffer.
         * SKBs are freed by ravb_tx_free() call above.
         */
index 489ef146201e61c629c17010f672a621642e94b3..6a9c954492f225987d5dc63713034548e7aabdbb 100644 (file)
@@ -37,6 +37,7 @@
 #define TSE_PCS_CONTROL_AN_EN_MASK                     BIT(12)
 #define TSE_PCS_CONTROL_REG                            0x00
 #define TSE_PCS_CONTROL_RESTART_AN_MASK                        BIT(9)
+#define TSE_PCS_CTRL_AUTONEG_SGMII                     0x1140
 #define TSE_PCS_IF_MODE_REG                            0x28
 #define TSE_PCS_LINK_TIMER_0_REG                       0x24
 #define TSE_PCS_LINK_TIMER_1_REG                       0x26
@@ -65,6 +66,7 @@
 #define TSE_PCS_SW_RESET_TIMEOUT                       100
 #define TSE_PCS_USE_SGMII_AN_MASK                      BIT(1)
 #define TSE_PCS_USE_SGMII_ENA                          BIT(0)
+#define TSE_PCS_IF_USE_SGMII                           0x03
 
 #define SGMII_ADAPTER_CTRL_REG                         0x00
 #define SGMII_ADAPTER_DISABLE                          0x0001
@@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
 {
        int ret = 0;
 
-       writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
+       writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
+
+       writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
 
        writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
        writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
index a74c481401c46ee659b1244b0124966cabeafdbd..12236daf7bb6d5358fdafe50e37227e19b95bc33 100644 (file)
@@ -1208,7 +1208,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
        u32 rx_count = priv->plat->rx_queues_to_use;
        unsigned int bfsize = 0;
        int ret = -ENOMEM;
-       u32 queue;
+       int queue;
        int i;
 
        if (priv->hw->mode->set_16kib_bfsize)
@@ -2724,7 +2724,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
 
                priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
                        0, 1,
-                       (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+                       (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
                        0, 0);
 
                tmp_len -= TSO_MAX_BUFF_SIZE;
@@ -2947,7 +2947,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        int i, csum_insertion = 0, is_jumbo = 0;
        u32 queue = skb_get_queue_mapping(skb);
        int nfrags = skb_shinfo(skb)->nr_frags;
-       unsigned int entry, first_entry;
+       int entry;
+       unsigned int first_entry;
        struct dma_desc *desc, *first;
        struct stmmac_tx_queue *tx_q;
        unsigned int enh_desc;
index 959fd12d2e670dfa52d7d9d11f835e990c82aa7c..6ebb0f559a427fdb4d27d9b668b46d7151650043 100644 (file)
@@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
 
        /* make enough headroom for basic scenario */
        encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
-       if (ip_tunnel_info_af(info) == AF_INET) {
+       if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
                encap_len += sizeof(struct iphdr);
                dev->max_mtu -= sizeof(struct iphdr);
        } else {
index 8c3633c1d0789718fc528b9873e8295e6ab6b09e..97e3bc60c3e7d111184f4c60ce4afe50757d1398 100644 (file)
@@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case HDLCDRVCTL_CALIBRATE:
                if(!capable(CAP_SYS_RAWIO))
                        return -EPERM;
+               if (s->par.bitrate <= 0)
+                       return -EINVAL;
                if (bi.data.calibrate > INT_MAX / s->par.bitrate)
                        return -EINVAL;
                s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
index 9097e42bec2e42d8ee864edea4a6be5d8c0a92cc..57297ba239871c631baef1dcfc2c30a66fdf3bba 100644 (file)
@@ -1127,8 +1127,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
                if (adv < 0)
                        return adv;
 
-               lpa &= adv;
-
                if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
                        phydev->duplex = DUPLEX_FULL;
                else
index 8e73f5f36e7120a5aa28b6e0dfb992eca3330e3e..f99c21f78b639fc1e6b984a20383f62021b757eb 100644 (file)
@@ -658,6 +658,18 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
        return 0;
 }
 
+static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       int rc;
+
+       /* Some devices have extra OF data and an OF-style MODALIAS */
+       rc = of_device_uevent_modalias(dev, env);
+       if (rc != -ENODEV)
+               return rc;
+
+       return 0;
+}
+
 #ifdef CONFIG_PM
 static int mdio_bus_suspend(struct device *dev)
 {
@@ -708,6 +720,7 @@ static const struct dev_pm_ops mdio_bus_pm_ops = {
 struct bus_type mdio_bus_type = {
        .name           = "mdio_bus",
        .match          = mdio_bus_match,
+       .uevent         = mdio_uevent,
        .pm             = MDIO_BUS_PM_OPS,
 };
 EXPORT_SYMBOL(mdio_bus_type);
index 6a5fd18f062c4ea400bc8036d787a94fe5108a34..b9252b8d81ffb720272ca5f0b25910c021eb28a3 100644 (file)
@@ -268,23 +268,12 @@ out:
        return ret;
 }
 
-static int kszphy_config_init(struct phy_device *phydev)
+/* Some config bits need to be set again on resume, handle them here. */
+static int kszphy_config_reset(struct phy_device *phydev)
 {
        struct kszphy_priv *priv = phydev->priv;
-       const struct kszphy_type *type;
        int ret;
 
-       if (!priv)
-               return 0;
-
-       type = priv->type;
-
-       if (type->has_broadcast_disable)
-               kszphy_broadcast_disable(phydev);
-
-       if (type->has_nand_tree_disable)
-               kszphy_nand_tree_disable(phydev);
-
        if (priv->rmii_ref_clk_sel) {
                ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
                if (ret) {
@@ -295,11 +284,30 @@ static int kszphy_config_init(struct phy_device *phydev)
        }
 
        if (priv->led_mode >= 0)
-               kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
+               kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
 
        return 0;
 }
 
+static int kszphy_config_init(struct phy_device *phydev)
+{
+       struct kszphy_priv *priv = phydev->priv;
+       const struct kszphy_type *type;
+
+       if (!priv)
+               return 0;
+
+       type = priv->type;
+
+       if (type->has_broadcast_disable)
+               kszphy_broadcast_disable(phydev);
+
+       if (type->has_nand_tree_disable)
+               kszphy_nand_tree_disable(phydev);
+
+       return kszphy_config_reset(phydev);
+}
+
 static int ksz8041_config_init(struct phy_device *phydev)
 {
        struct device_node *of_node = phydev->mdio.dev.of_node;
@@ -700,8 +708,14 @@ static int kszphy_suspend(struct phy_device *phydev)
 
 static int kszphy_resume(struct phy_device *phydev)
 {
+       int ret;
+
        genphy_resume(phydev);
 
+       ret = kszphy_config_reset(phydev);
+       if (ret)
+               return ret;
+
        /* Enable PHY Interrupts */
        if (phy_interrupt_is_valid(phydev)) {
                phydev->interrupts = PHY_INTERRUPT_ENABLED;
index 82ab8fb82587553fefc1d05bbff06d4a76bc9679..7524caa0f29d9806e11826c7ecfd57842bff1822 100644 (file)
@@ -241,7 +241,7 @@ static const struct phy_setting settings[] = {
  * phy_lookup_setting - lookup a PHY setting
  * @speed: speed to match
  * @duplex: duplex to match
- * @feature: allowed link modes
+ * @features: allowed link modes
  * @exact: an exact match is required
  *
  * Search the settings array for a setting that matches the speed and
index 3e9246cc49c3784ebc045868a53318d35bf01075..a871f45ecc79a438b2b43465d3719f240ff25cb5 100644 (file)
@@ -869,7 +869,7 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
        unsigned int len;
 
        len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
-                               rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len);
+                               rq->min_buf_len, PAGE_SIZE - hdr_len);
        return ALIGN(len, L1_CACHE_BYTES);
 }
 
@@ -2144,7 +2144,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
        unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
        unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
 
-       return max(min_buf_len, hdr_len);
+       return max(max(min_buf_len, hdr_len) - hdr_len,
+                  (unsigned int)GOOD_PACKET_LEN);
 }
 
 static int virtnet_find_vqs(struct virtnet_info *vi)
index 328b4712683c334bf1de66a3a3789d0a04d734c3..a6b5052c1d36bb99260dd4232842fa9e8df2621c 100644 (file)
@@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
 
 static int vxlan_sock_add(struct vxlan_dev *vxlan);
 
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
+
 /* per-network namespace private data for this module */
 struct vxlan_net {
        struct list_head  vxlan_list;
@@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
        call_rcu(&f->rcu, vxlan_fdb_free);
 }
 
+static void vxlan_dst_free(struct rcu_head *head)
+{
+       struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
+
+       dst_cache_destroy(&rd->dst_cache);
+       kfree(rd);
+}
+
+static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+                                 struct vxlan_rdst *rd)
+{
+       list_del_rcu(&rd->list);
+       vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+       call_rcu(&rd->rcu, vxlan_dst_free);
+}
+
 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
                           union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
                           __be32 *vni, u32 *ifindex)
@@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
         * otherwise destroy the fdb entry
         */
        if (rd && !list_is_singular(&f->remotes)) {
-               list_del_rcu(&rd->list);
-               vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
-               kfree_rcu(rd, rcu);
+               vxlan_fdb_dst_destroy(vxlan, f, rd);
                goto out;
        }
 
@@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
        rcu_assign_pointer(vxlan->vn4_sock, NULL);
        synchronize_net();
 
+       vxlan_vs_del_dev(vxlan);
+
        if (__vxlan_sock_release_prep(sock4)) {
                udp_tunnel_sock_release(sock4->sock);
                kfree(sock4);
@@ -2342,6 +2360,15 @@ static void vxlan_cleanup(unsigned long arg)
        mod_timer(&vxlan->age_timer, next_timer);
 }
 
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
+{
+       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+
+       spin_lock(&vn->sock_lock);
+       hlist_del_init_rcu(&vxlan->hlist);
+       spin_unlock(&vn->sock_lock);
+}
+
 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
 {
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -3286,15 +3313,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 
        vxlan_flush(vxlan, true);
 
-       spin_lock(&vn->sock_lock);
-       if (!hlist_unhashed(&vxlan->hlist))
-               hlist_del_rcu(&vxlan->hlist);
-       spin_unlock(&vn->sock_lock);
-
        gro_cells_destroy(&vxlan->gro_cells);
        list_del(&vxlan->next);
        unregister_netdevice_queue(dev, head);
index d5e993dc9b238c6fd306b801083346570eec8790..517a315e259b79f05f2d8d3e88c371a9f39ad9e9 100644 (file)
@@ -1271,6 +1271,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
        qcom_smem_state_put(wcn->tx_enable_state);
        qcom_smem_state_put(wcn->tx_rings_empty_state);
 
+       rpmsg_destroy_ept(wcn->smd_channel);
+
        iounmap(wcn->dxe_base);
        iounmap(wcn->ccu_base);
 
index fc64b8913aa6a11c0111fec3b9d900174dc250c3..e03450059b06c0bfe510148f985c19668bcd3dff 100644 (file)
@@ -3422,7 +3422,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
                /* otherwise, set txglomalign */
                value = sdiodev->settings->bus.sdio.sd_sgentry_align;
                /* SDIO ADMA requires at least 32 bit alignment */
-               value = max_t(u32, value, 4);
+               value = max_t(u32, value, ALIGNMENT);
                err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
                                           sizeof(u32));
        }
index 3b3e076571d6d7089ae2df09d2dfbd2008cf81e4..45e2efc70d19e5f44c7a5e2a1cde2cf9a7448f91 100644 (file)
@@ -79,8 +79,8 @@
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  17
 #define IWL7265_UCODE_API_MIN  17
-#define IWL7265D_UCODE_API_MIN 17
-#define IWL3168_UCODE_API_MIN  20
+#define IWL7265D_UCODE_API_MIN 22
+#define IWL3168_UCODE_API_MIN  22
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
index b9718c0cf17480dc4c1ab212fdbfabecb3125775..89137717c1fce778d63a55af99f16f970f3b34d6 100644 (file)
@@ -74,8 +74,8 @@
 #define IWL8265_UCODE_API_MAX  30
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN  17
-#define IWL8265_UCODE_API_MIN  20
+#define IWL8000_UCODE_API_MIN  22
+#define IWL8265_UCODE_API_MIN  22
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION            0x0a1d
index 306bc967742ee9a7b5629bab00f20cab77309fa1..77efbb78e867bfac47ae992c2703275e1b5062b2 100644 (file)
 #define MON_DMARB_RD_DATA_ADDR         (0xa03c5c)
 
 #define DBGC_IN_SAMPLE                 (0xa03c00)
+#define DBGC_OUT_CTRL                  (0xa03c0c)
 
 /* enable the ID buf for read */
 #define WFPM_PS_CTL_CLR                        0xA0300C
index 1b7d265ffb0acb476228b5b2a10040c09e41d61c..a10c6aae9ab98de7c752b05b9cc33059337b1410 100644 (file)
@@ -307,6 +307,11 @@ enum {
 /* Bit 1-3: LQ command color. Used to match responses to LQ commands */
 #define LQ_FLAG_COLOR_POS               1
 #define LQ_FLAG_COLOR_MSK               (7 << LQ_FLAG_COLOR_POS)
+#define LQ_FLAG_COLOR_GET(_f)          (((_f) & LQ_FLAG_COLOR_MSK) >>\
+                                        LQ_FLAG_COLOR_POS)
+#define LQ_FLAGS_COLOR_INC(_c)         ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
+                                        LQ_FLAG_COLOR_MSK)
+#define LQ_FLAG_COLOR_SET(_f, _c)      ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
 
 /* Bit 4-5: Tx RTS BW Signalling
  * (0) No RTS BW signalling
index 81b98915b1a42e21a318d293c459015688489c7f..1360ebfdc51bc6475c2c68301781b2296bb03369 100644 (file)
@@ -519,8 +519,11 @@ struct agg_tx_status {
  * bit-7 invalid rate indication
  */
 #define TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define TX_RES_RATE_TABLE_COLOR_POS 4
 #define TX_RES_RATE_TABLE_COLOR_MSK 0x70
 #define TX_RES_INV_RATE_INDEX_MSK 0x80
+#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
+                                      TX_RES_RATE_TABLE_COLOR_POS)
 
 #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
 #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
index 7b86a4f1b574c6f507fbde87c1276241fd534a4a..c8712e6eea74187af9c0d8ef3a73622ab7d1d5fd 100644 (file)
@@ -1002,14 +1002,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
        return 0;
 }
 
-static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
-{
-       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
-               iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
-       else
-               iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
-}
-
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
 {
        u8 *ptr;
@@ -1023,10 +1015,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
        /* EARLY START - firmware's configuration is hard coded */
        if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
             !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
-           conf_id == FW_DBG_START_FROM_ALIVE) {
-               iwl_mvm_restart_early_start(mvm);
+           conf_id == FW_DBG_START_FROM_ALIVE)
                return 0;
-       }
 
        if (!mvm->fw->dbg_conf_tlv[conf_id])
                return -EINVAL;
index 0f1831b419159b606967889ff8ea20c7ef86f0f2..fd2fc46e2fe51d8e8f1930af05e40ee9400c84a6 100644 (file)
@@ -1040,7 +1040,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
                struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
                struct iwl_mac_beacon_cmd_v7 beacon_cmd;
        } u = {};
-       struct iwl_mac_beacon_cmd beacon_cmd;
+       struct iwl_mac_beacon_cmd beacon_cmd = {};
        struct ieee80211_tx_info *info;
        u32 beacon_skb_len;
        u32 rate, tx_flags;
index 4e74a6b90e70626d6e0e8bb8092796f078c7d55d..52f8d7a6a7dcec95d32089a11c7c9e37b53748a7 100644 (file)
@@ -1730,8 +1730,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
  */
 static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
 {
+       u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
+               IWL_MVM_CMD_QUEUE;
+
        return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
-               ~BIT(IWL_MVM_CMD_QUEUE));
+               ~BIT(cmd_queue));
 }
 
 static inline
@@ -1753,6 +1756,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
        if (!iwl_mvm_has_new_tx_api(mvm))
                iwl_free_fw_paging(mvm);
        mvm->ucode_loaded = false;
+       mvm->fw_dbg_conf = FW_DBG_INVALID;
        iwl_trans_stop_device(mvm->trans);
 }
 
index 9ffff6ed813386418800cf38906b91989c5b6f52..3da5ec40aaead90731224bce3686071ae9dd9344 100644 (file)
@@ -1149,21 +1149,37 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
 
        mutex_lock(&mvm->mutex);
 
-       /* stop recording */
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               /* stop recording */
                iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+
+               iwl_mvm_fw_error_dump(mvm);
+
+               /* start recording again if the firmware is not crashed */
+               if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+                   mvm->fw->dbg_dest_tlv)
+                       iwl_clear_bits_prph(mvm->trans,
+                                           MON_BUFF_SAMPLE_CTL, 0x100);
        } else {
+               u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
+               u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
+
+               /* stop recording */
                iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
-               /* wait before we collect the data till the DBGC stop */
                udelay(100);
-       }
+               iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+               /* wait before we collect the data till the DBGC stop */
+               udelay(500);
 
-       iwl_mvm_fw_error_dump(mvm);
+               iwl_mvm_fw_error_dump(mvm);
 
-       /* start recording again if the firmware is not crashed */
-       WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
-                    mvm->fw->dbg_dest_tlv &&
-                    iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
+               /* start recording again if the firmware is not crashed */
+               if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+                   mvm->fw->dbg_dest_tlv) {
+                       iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
+                       iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
+               }
+       }
 
        mutex_unlock(&mvm->mutex);
 
index 7788eefcd2bdd3066c0b40d5f5575e44f2102c0e..aa785cf3cf68399eb724b33d24d9168602f791a1 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -1083,34 +1083,6 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
                rs_get_lower_rate_in_column(lq_sta, rate);
 }
 
-/* Check if both rates are identical
- * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
- * with a rate indicating STBC/BFER and ANT_AB.
- */
-static inline bool rs_rate_equal(struct rs_rate *a,
-                                struct rs_rate *b,
-                                bool allow_ant_mismatch)
-
-{
-       bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
-               (a->bfer == b->bfer);
-
-       if (allow_ant_mismatch) {
-               if (a->stbc || a->bfer) {
-                       WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
-                                 a->stbc, a->bfer, a->ant);
-                       ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
-               } else if (b->stbc || b->bfer) {
-                       WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
-                                 b->stbc, b->bfer, b->ant);
-                       ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
-               }
-       }
-
-       return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
-               (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
-}
-
 /* Check if both rates share the same column */
 static inline bool rs_rate_column_match(struct rs_rate *a,
                                        struct rs_rate *b)
@@ -1182,12 +1154,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        u32 lq_hwrate;
        struct rs_rate lq_rate, tx_resp_rate;
        struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
-       u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
+       u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
+       u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
+       u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
        u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
-       bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
-                                            IWL_UCODE_TLV_API_LQ_SS_PARAMS);
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -1262,10 +1234,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
 
        /* Here we actually compare this rate to the latest LQ command */
-       if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
+       if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
                IWL_DEBUG_RATE(mvm,
-                              "initial tx resp rate 0x%x does not match 0x%x\n",
-                              tx_resp_hwrate, lq_hwrate);
+                              "tx resp color 0x%x does not match 0x%x\n",
+                              lq_color, LQ_FLAG_COLOR_GET(table->flags));
 
                /*
                 * Since rates mis-match, the last LQ command may have failed.
@@ -3326,6 +3298,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
        u8 valid_tx_ant = 0;
        struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
        bool toggle_ant = false;
+       u32 color;
 
        memcpy(&rate, initial_rate, sizeof(rate));
 
@@ -3380,6 +3353,9 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
                                 num_rates, num_retries, valid_tx_ant,
                                 toggle_ant);
 
+       /* update the color of the LQ command (as a counter at bits 1-3) */
+       color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
+       lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
 }
 
 struct rs_bfer_active_iter_data {
index ee207f2c0a90c797e84659473f0bd1a455b00923..3abde1cb03034f9068230420072961371a187262 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -357,6 +358,20 @@ struct iwl_lq_sta {
        } pers;
 };
 
+/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
+ * Note, it's iwlmvm <-> mac80211 interface.
+ * bits 0-7: reduced tx power
+ * bits 8-10: LQ command's color
+ */
+#define RS_DRV_DATA_TXP_MSK 0xff
+#define RS_DRV_DATA_LQ_COLOR_POS 8
+#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
+                                     RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
+                                 (((uintptr_t)_p) |\
+                                  ((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
+
 /* Initialize station's rate scaling information after adding station */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                          enum nl80211_band band, bool init);
index f5c786ddc52631087b56067807bb4d48e0869664..614d67810d051c539bd093b9452c64dc4e9c8a2f 100644 (file)
@@ -2120,7 +2120,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        if (!iwl_mvm_is_dqa_supported(mvm))
                return 0;
 
-       if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
+       if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+                   vif->type != NL80211_IFTYPE_ADHOC))
                return -ENOTSUPP;
 
        /*
@@ -2155,6 +2156,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                mvmvif->cab_queue = queue;
        } else if (!fw_has_api(&mvm->fw->ucode_capa,
                               IWL_UCODE_TLV_API_STA_TYPE)) {
+               /*
+                * In IBSS, ieee80211_check_queues() sets the cab_queue to be
+                * invalid, so make sure we use the queue we want.
+                * Note that this is done here as we want to avoid making DQA
+                * changes in mac80211 layer.
+                */
+               if (vif->type == NL80211_IFTYPE_ADHOC) {
+                       vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+                       mvmvif->cab_queue = vif->cab_queue;
+               }
                iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
                                   &cfg, timeout);
        }
@@ -3321,18 +3332,15 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
 
        /* Get the station from the mvm local station table */
        mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
-       if (!mvm_sta) {
-               IWL_ERR(mvm, "Failed to find station\n");
-               return -EINVAL;
-       }
-       sta_id = mvm_sta->sta_id;
+       if (mvm_sta)
+               sta_id = mvm_sta->sta_id;
 
        IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
                      keyconf->keyidx, sta_id);
 
-       if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
-           keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
-           keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+       if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+                       keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+                       keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
                return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
 
        if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
index 2716cb5483bf5ab9851e7dbf1e5af04b887296e7..ad62b67dceb2836cefe354fd69af60aedc856f8e 100644 (file)
@@ -313,6 +313,7 @@ enum iwl_mvm_agg_state {
  *     This is basically (last acked packet++).
  * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
  *     Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @lq_color: the color of the LQ command as it appears in tx response.
  * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
  * @state: state of the BA agreement establishment / tear down.
  * @txq_id: Tx queue used by the BA session / DQA
@@ -331,6 +332,7 @@ struct iwl_mvm_tid_data {
        u16 next_reclaimed;
        /* The rest is Tx AGG related */
        u32 rate_n_flags;
+       u8 lq_color;
        bool amsdu_in_ampdu_allowed;
        enum iwl_mvm_agg_state state;
        u16 txq_id;
index f9cbd197246f7ba6ba9e5e0af816cbec54eb9790..506d58104e1cc007ba9d767a16dd77f9df8f0481 100644 (file)
@@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
        struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
        int ret;
 
-       if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
-               return -EIO;
-
        mutex_lock(&mvm->mutex);
 
+       if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+               ret = -EIO;
+               goto unlock;
+       }
+
        if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
                ret = -EINVAL;
                goto unlock;
index bcaceb64a6e8c230c5127ee5cc4b790b1f4f1d57..f21901cd4a4fdf75dac1b55d7b9525e84f9e65dd 100644 (file)
@@ -1323,6 +1323,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
        struct iwl_mvm_sta *mvmsta;
        struct sk_buff_head skbs;
        u8 skb_freed = 0;
+       u8 lq_color;
        u16 next_reclaimed, seq_ctl;
        bool is_ndp = false;
 
@@ -1405,8 +1406,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                info->status.tx_time =
                        le16_to_cpu(tx_resp->wireless_media_time);
                BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+               lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
                info->status.status_driver_data[0] =
-                               (void *)(uintptr_t)tx_resp->reduced_tpc;
+                       RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
 
                ieee80211_tx_status(mvm->hw, skb);
        }
@@ -1638,6 +1640,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
                        le32_to_cpu(tx_resp->initial_rate);
                mvmsta->tid_data[tid].tx_time =
                        le16_to_cpu(tx_resp->wireless_media_time);
+               mvmsta->tid_data[tid].lq_color =
+                       (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
+                       TX_RES_RATE_TABLE_COLOR_POS;
        }
 
        rcu_read_unlock();
@@ -1707,6 +1712,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
        iwl_mvm_check_ratid_empty(mvm, sta, tid);
 
        freed = 0;
+
+       /* pack lq color from tid_data along the reduced txp */
+       ba_info->status.status_driver_data[0] =
+               RS_DRV_DATA_PACK(tid_data->lq_color,
+                                ba_info->status.status_driver_data[0]);
        ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
 
        skb_queue_walk(&reclaimed_skbs, skb) {
index 70acf850a9f19f9750296ae2019d8f4a8c277b67..93cbc7a69bcd55d3560529c88b6a127451606cd7 100644 (file)
@@ -2803,7 +2803,8 @@ static struct iwl_trans_dump_data
 #ifdef CONFIG_PM_SLEEP
 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
 {
-       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+           (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
                return iwl_pci_fw_enter_d0i3(trans);
 
        return 0;
@@ -2811,7 +2812,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
 
 static void iwl_trans_pcie_resume(struct iwl_trans *trans)
 {
-       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+           (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
                iwl_pci_fw_exit_d0i3(trans);
 }
 #endif /* CONFIG_PM_SLEEP */
index 9fb46a6f47cf416e55d4416b228be50cdd460e4e..9c9bfbbabdf11ee597dfa7a3614e59d2e49236fe 100644 (file)
@@ -906,7 +906,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
 
        if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
                ret = -EINVAL;
-               goto error;
+               goto error_free_resp;
        }
 
        rsp = (void *)hcmd.resp_pkt->data;
@@ -915,13 +915,13 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
        if (qid > ARRAY_SIZE(trans_pcie->txq)) {
                WARN_ONCE(1, "queue index %d unsupported", qid);
                ret = -EIO;
-               goto error;
+               goto error_free_resp;
        }
 
        if (test_and_set_bit(qid, trans_pcie->queue_used)) {
                WARN_ONCE(1, "queue %d already used", qid);
                ret = -EIO;
-               goto error;
+               goto error_free_resp;
        }
 
        txq->id = qid;
@@ -934,8 +934,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
                           (txq->write_ptr) | (qid << 16));
        IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
 
+       iwl_free_resp(&hcmd);
        return qid;
 
+error_free_resp:
+       iwl_free_resp(&hcmd);
 error:
        iwl_pcie_gen2_txq_free_memory(trans, txq);
        return ret;
index a60926410438b98c2e414de081f7c8093bac5862..903d5813023a93588c08857ff0db1339bbb99c86 100644 (file)
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
 static int nvme_char_major;
 module_param(nvme_char_major, int, 0);
 
-static unsigned long default_ps_max_latency_us = 25000;
+static unsigned long default_ps_max_latency_us = 100000;
 module_param(default_ps_max_latency_us, ulong, 0644);
 MODULE_PARM_DESC(default_ps_max_latency_us,
                 "max power saving latency for new devices; use PM QOS to change per device");
@@ -1342,7 +1342,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
         * transitioning between power states.  Therefore, when running
         * in any given state, we will enter the next lower-power
         * non-operational state after waiting 50 * (enlat + exlat)
-        * microseconds, as long as that state's total latency is under
+        * microseconds, as long as that state's exit latency is under
         * the requested maximum latency.
         *
         * We will not autonomously enter any non-operational state for
@@ -1387,7 +1387,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                 * lowest-power state, not the number of states.
                 */
                for (state = (int)ctrl->npss; state >= 0; state--) {
-                       u64 total_latency_us, transition_ms;
+                       u64 total_latency_us, exit_latency_us, transition_ms;
 
                        if (target)
                                table->entries[state] = target;
@@ -1408,12 +1408,15 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                              NVME_PS_FLAGS_NON_OP_STATE))
                                continue;
 
-                       total_latency_us =
-                               (u64)le32_to_cpu(ctrl->psd[state].entry_lat) +
-                               + le32_to_cpu(ctrl->psd[state].exit_lat);
-                       if (total_latency_us > ctrl->ps_max_latency_us)
+                       exit_latency_us =
+                               (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
+                       if (exit_latency_us > ctrl->ps_max_latency_us)
                                continue;
 
+                       total_latency_us =
+                               exit_latency_us +
+                               le32_to_cpu(ctrl->psd[state].entry_lat);
+
                        /*
                         * This state is good.  Use it as the APST idle
                         * target for higher power states.
@@ -2438,6 +2441,10 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
        struct nvme_ns *ns;
 
        mutex_lock(&ctrl->namespaces_mutex);
+
+       /* Forcibly start all queues to avoid having stuck requests */
+       blk_mq_start_hw_queues(ctrl->admin_q);
+
        list_for_each_entry(ns, &ctrl->namespaces, list) {
                /*
                 * Revalidating a dead namespace sets capacity to 0. This will
index 5b14cbefb7240d5e7d50bb1ade8fd958417282e8..92964cef0f4be5795bed3e874407c74a3e3cc725 100644 (file)
@@ -1139,6 +1139,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
 /* *********************** NVME Ctrl Routines **************************** */
 
 static void __nvme_fc_final_op_cleanup(struct request *rq);
+static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
 
 static int
 nvme_fc_reinit_request(void *data, struct request *rq)
@@ -1265,7 +1266,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
        struct nvme_command *sqe = &op->cmd_iu.sqe;
        __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
        union nvme_result result;
-       bool complete_rq;
+       bool complete_rq, terminate_assoc = true;
 
        /*
         * WARNING:
@@ -1294,6 +1295,14 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
         * fabricate a CQE, the following fields will not be set as they
         * are not referenced:
         *      cqe.sqid,  cqe.sqhd,  cqe.command_id
+        *
+        * Failure or error of an individual i/o, in a transport
+        * detected fashion unrelated to the nvme completion status,
+        * potentially cause the initiator and target sides to get out
+        * of sync on SQ head/tail (aka outstanding io count allowed).
+        * Per FC-NVME spec, failure of an individual command requires
+        * the connection to be terminated, which in turn requires the
+        * association to be terminated.
         */
 
        fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
@@ -1359,6 +1368,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
                goto done;
        }
 
+       terminate_assoc = false;
+
 done:
        if (op->flags & FCOP_FLAGS_AEN) {
                nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
@@ -1366,7 +1377,7 @@ done:
                atomic_set(&op->state, FCPOP_STATE_IDLE);
                op->flags = FCOP_FLAGS_AEN;     /* clear other flags */
                nvme_fc_ctrl_put(ctrl);
-               return;
+               goto check_error;
        }
 
        complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
@@ -1379,6 +1390,10 @@ done:
                nvme_end_request(rq, status, result);
        } else
                __nvme_fc_final_op_cleanup(rq);
+
+check_error:
+       if (terminate_assoc)
+               nvme_fc_error_recovery(ctrl, "transport detected io error");
 }
 
 static int
@@ -2791,6 +2806,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
                ctrl->ctrl.opts = NULL;
                /* initiate nvme ctrl ref counting teardown */
                nvme_uninit_ctrl(&ctrl->ctrl);
+               nvme_put_ctrl(&ctrl->ctrl);
 
                /* as we're past the point where we transition to the ref
                 * counting teardown path, if we return a bad pointer here,
index d52701df72457d0fa2b85a168c500fd022b8b717..951042a375d6b22dbd34988e38fef7114593c366 100644 (file)
@@ -1367,7 +1367,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
        bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
 
        /* If there is a reset ongoing, we shouldn't reset again. */
-       if (work_busy(&dev->reset_work))
+       if (dev->ctrl.state == NVME_CTRL_RESETTING)
                return false;
 
        /* We shouldn't reset unless the controller is on fatal error state
@@ -1903,7 +1903,7 @@ static void nvme_reset_work(struct work_struct *work)
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result = -ENODEV;
 
-       if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
+       if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
                goto out;
 
        /*
@@ -1913,9 +1913,6 @@ static void nvme_reset_work(struct work_struct *work)
        if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
                nvme_dev_disable(dev, false);
 
-       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
-               goto out;
-
        result = nvme_pci_enable(dev);
        if (result)
                goto out;
@@ -2009,8 +2006,8 @@ static int nvme_reset(struct nvme_dev *dev)
 {
        if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
                return -ENODEV;
-       if (work_busy(&dev->reset_work))
-               return -ENODEV;
+       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+               return -EBUSY;
        if (!queue_work(nvme_workq, &dev->reset_work))
                return -EBUSY;
        return 0;
@@ -2136,6 +2133,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto release_pools;
 
+       nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
        dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
        queue_work(nvme_workq, &dev->reset_work);
@@ -2179,6 +2177,7 @@ static void nvme_remove(struct pci_dev *pdev)
 
        nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
 
+       cancel_work_sync(&dev->reset_work);
        pci_set_drvdata(pdev, NULL);
 
        if (!pci_device_is_present(pdev)) {
index 28bd255c144dcca10aa60cede2c9a51cd101426a..24397d306d532213cf66e1ca0de9aa43bf12d3d5 100644 (file)
@@ -753,28 +753,26 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        if (ret)
                goto requeue;
 
-       blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
-
        ret = nvmf_connect_admin_queue(&ctrl->ctrl);
        if (ret)
-               goto stop_admin_q;
+               goto requeue;
 
        set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
 
        ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
        if (ret)
-               goto stop_admin_q;
+               goto requeue;
 
        nvme_start_keep_alive(&ctrl->ctrl);
 
        if (ctrl->queue_count > 1) {
                ret = nvme_rdma_init_io_queues(ctrl);
                if (ret)
-                       goto stop_admin_q;
+                       goto requeue;
 
                ret = nvme_rdma_connect_io_queues(ctrl);
                if (ret)
-                       goto stop_admin_q;
+                       goto requeue;
        }
 
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -782,7 +780,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        ctrl->ctrl.opts->nr_reconnects = 0;
 
        if (ctrl->queue_count > 1) {
-               nvme_start_queues(&ctrl->ctrl);
                nvme_queue_scan(&ctrl->ctrl);
                nvme_queue_async_events(&ctrl->ctrl);
        }
@@ -791,8 +788,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 
        return;
 
-stop_admin_q:
-       blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
 requeue:
        dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
                        ctrl->ctrl.opts->nr_reconnects);
@@ -823,6 +818,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
 
+       /*
+        * queues are not a live anymore, so restart the queues to fail fast
+        * new IO
+        */
+       blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
+       nvme_start_queues(&ctrl->ctrl);
+
        nvme_rdma_reconnect_or_remove(ctrl);
 }
 
@@ -1433,7 +1435,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
 /*
  * We cannot accept any other command until the Connect command has completed.
  */
-static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
+static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
                struct request *rq)
 {
        if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
@@ -1441,11 +1443,22 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
 
                if (!blk_rq_is_passthrough(rq) ||
                    cmd->common.opcode != nvme_fabrics_command ||
-                   cmd->fabrics.fctype != nvme_fabrics_type_connect)
-                       return false;
+                   cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+                       /*
+                        * reconnecting state means transport disruption, which
+                        * can take a long time and even might fail permanently,
+                        * so we can't let incoming I/O be requeued forever.
+                        * fail it fast to allow upper layers a chance to
+                        * failover.
+                        */
+                       if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
+                               return -EIO;
+                       else
+                               return -EAGAIN;
+               }
        }
 
-       return true;
+       return 0;
 }
 
 static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1463,8 +1476,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        WARN_ON_ONCE(rq->tag < 0);
 
-       if (!nvme_rdma_queue_is_ready(queue, rq))
-               return BLK_MQ_RQ_QUEUE_BUSY;
+       ret = nvme_rdma_queue_is_ready(queue, rq);
+       if (unlikely(ret))
+               goto err;
 
        dev = queue->device->dev;
        ib_dma_sync_single_for_cpu(dev, sqe->dma,
index 34c862f213c7e85554830b741cdf051f1fbbd86d..0a9b78705ee810c9e18c6fa1f46551fc27374287 100644 (file)
@@ -29,6 +29,17 @@ static int arm_pmu_acpi_register_irq(int cpu)
                return -EINVAL;
 
        gsi = gicc->performance_interrupt;
+
+       /*
+        * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
+        * have an interrupt. QEMU advertises this by using a GSI of zero,
+        * which is not known to be valid on any hardware despite being
+        * valid per the spec. Take the pragmatic approach and reject a
+        * GSI of zero for now.
+        */
+       if (!gsi)
+               return 0;
+
        if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
                trigger = ACPI_EDGE_SENSITIVE;
        else
index 1653cbda6a8299b33b5cebae92bd4710e41412a4..bd459a93b0e7e9b11c999dd4bf9b95c3500be3e2 100644 (file)
@@ -680,30 +680,16 @@ EXPORT_SYMBOL_GPL(pinctrl_generic_remove_group);
  * pinctrl_generic_free_groups() - removes all pin groups
  * @pctldev: pin controller device
  *
- * Note that the caller must take care of locking.
+ * Note that the caller must take care of locking. The pinctrl groups
+ * are allocated with devm_kzalloc() so no need to free them here.
  */
 static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev)
 {
        struct radix_tree_iter iter;
-       struct group_desc *group;
-       unsigned long *indices;
        void **slot;
-       int i = 0;
-
-       indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
-                              pctldev->num_groups, GFP_KERNEL);
-       if (!indices)
-               return;
 
        radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0)
-               indices[i++] = iter.index;
-
-       for (i = 0; i < pctldev->num_groups; i++) {
-               group = radix_tree_lookup(&pctldev->pin_group_tree,
-                                         indices[i]);
-               radix_tree_delete(&pctldev->pin_group_tree, indices[i]);
-               devm_kfree(pctldev->dev, group);
-       }
+               radix_tree_delete(&pctldev->pin_group_tree, iter.index);
 
        pctldev->num_groups = 0;
 }
index 41b5b07d5a2bf51f6b0623597c294862910de78c..6852010a6d708b5010555cbb141bf57ac31de077 100644 (file)
@@ -194,6 +194,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
        return 0;
 }
 
+static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
+{
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp &= ~(mask << shift);
+       tmp |= value << shift;
+       writel(tmp, reg);
+}
+
 static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
                               unsigned group)
 {
@@ -211,8 +221,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
                reg += bank * 0x20 + pin / 16 * 0x10;
                shift = pin % 16 * 2;
 
-               writel(0x3 << shift, reg + CLR);
-               writel(g->muxsel[i] << shift, reg + SET);
+               mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
        }
 
        return 0;
@@ -279,8 +288,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
                        /* mA */
                        if (config & MA_PRESENT) {
                                shift = pin % 8 * 4;
-                               writel(0x3 << shift, reg + CLR);
-                               writel(ma << shift, reg + SET);
+                               mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
                        }
 
                        /* vol */
index 2debba62fac90d956ce37cd09805c518ee4a8da5..20f1b44939944614ff270c757fc7152f901e9f09 100644 (file)
@@ -1539,15 +1539,29 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
  * is not listed below.
  */
 static const struct dmi_system_id chv_no_valid_mask[] = {
+       /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
        {
-               /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
-               .ident = "Acer Chromebook (CYAN)",
+               .ident = "Intel_Strago based Chromebooks (All models)",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
-                       DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
                },
-       }
+       },
+       {
+               .ident = "Acer Chromebook R11 (Cyan)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
+               },
+       },
+       {
+               .ident = "Samsung Chromebook 3 (Celes)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+               },
+       },
+       {}
 };
 
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
index 0d6b7f4b82af34a2a85e51c924ca8420bd7a6268..720a19fd38d2c6c24d30e5e1c7dcdf70cae0e590 100644 (file)
@@ -35,7 +35,6 @@ static const struct pin_config_item conf_items[] = {
        PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
                                "input bias pull to pin specific state", NULL, false),
        PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
-       PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
@@ -161,7 +160,6 @@ static const struct pinconf_generic_params dt_params[] = {
        { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 },
        { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 },
        { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 },
-       { "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 },
        { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 },
        { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 },
        { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 },
@@ -174,7 +172,6 @@ static const struct pinconf_generic_params dt_params[] = {
        { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
        { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
        { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
-       { "output-enable", PIN_CONFIG_OUTPUT, 1, },
        { "output-high", PIN_CONFIG_OUTPUT, 1, },
        { "output-low", PIN_CONFIG_OUTPUT, 0, },
        { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
index 9fd6d9087dc508ca7731d7f1e868988e0e320cc2..16b3ae5e4f440c4769db55ebf7c61ebae7e1e5c1 100644 (file)
@@ -826,30 +826,17 @@ EXPORT_SYMBOL_GPL(pinmux_generic_remove_function);
  * pinmux_generic_free_functions() - removes all functions
  * @pctldev: pin controller device
  *
- * Note that the caller must take care of locking.
+ * Note that the caller must take care of locking. The pinctrl
+ * functions are allocated with devm_kzalloc() so no need to free
+ * them here.
  */
 void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
 {
        struct radix_tree_iter iter;
-       struct function_desc *function;
-       unsigned long *indices;
        void **slot;
-       int i = 0;
-
-       indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
-                              pctldev->num_functions, GFP_KERNEL);
-       if (!indices)
-               return;
 
        radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0)
-               indices[i++] = iter.index;
-
-       for (i = 0; i < pctldev->num_functions; i++) {
-               function = radix_tree_lookup(&pctldev->pin_function_tree,
-                                            indices[i]);
-               radix_tree_delete(&pctldev->pin_function_tree, indices[i]);
-               devm_kfree(pctldev->dev, function);
-       }
+               radix_tree_delete(&pctldev->pin_function_tree, iter.index);
 
        pctldev->num_functions = 0;
 }
index 9aec1d2232dd830e2c19a8e1e394e6033e621c21..6624499eae72f5c2ba986c8c54c6f7e583f05f2a 100644 (file)
@@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x3, "owa")),          /* DOUT */
+                 SUNXI_FUNCTION(0x3, "spdif")),        /* DOUT */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out")),
index 35ce53edabf90009efcd228e71a109e410337708..d5e5229308f2291136ebe7c6061346507a733907 100644 (file)
@@ -155,3 +155,5 @@ static int __init hi6220_reset_init(void)
 }
 
 postcore_initcall(hi6220_reset_init);
+
+MODULE_LICENSE("GPL v2");
index bd7d39ecbd2470246a58a8ed3a3fd11367df814f..fb06974c88c15c2b23864e44779e7d61826546bf 100644 (file)
@@ -1873,6 +1873,11 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
        tcp_task->dd_data = tdata;
        task->hdr = NULL;
 
+       if (tdata->skb) {
+               kfree_skb(tdata->skb);
+               tdata->skb = NULL;
+       }
+
        if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
            (opcode == ISCSI_OP_SCSI_DATA_OUT ||
             (opcode == ISCSI_OP_SCSI_CMD &&
@@ -1890,6 +1895,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
                return -ENOMEM;
        }
 
+       skb_get(tdata->skb);
        skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
        task->hdr = (struct iscsi_hdr *)tdata->skb->data;
        task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
@@ -2035,9 +2041,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
        unsigned int datalen;
        int err;
 
-       if (!skb) {
+       if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) {
                log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
-                       "task 0x%p, skb NULL.\n", task);
+                       "task 0x%p, skb 0x%p\n", task, skb);
                return 0;
        }
 
@@ -2050,7 +2056,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
        }
 
        datalen = skb->data_len;
-       tdata->skb = NULL;
 
        /* write ppod first if using ofldq to write ppod */
        if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) {
@@ -2078,6 +2083,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
                        pdulen += ISCSI_DIGEST_SIZE;
 
                task->conn->txdata_octets += pdulen;
+               cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE);
                return 0;
        }
 
@@ -2086,7 +2092,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
                        "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
                        task, skb, skb->len, skb->data_len, err);
                /* reset skb to send when we are called again */
-               tdata->skb = skb;
                return err;
        }
 
@@ -2094,7 +2099,8 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
                "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
                task->itt, skb, skb->len, skb->data_len, err);
 
-       kfree_skb(skb);
+       __kfree_skb(tdata->skb);
+       tdata->skb = NULL;
 
        iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
        iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
@@ -2113,8 +2119,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
 
        tcp_task->dd_data = NULL;
        /*  never reached the xmit task callout */
-       if (tdata->skb)
-               __kfree_skb(tdata->skb);
+       if (tdata->skb) {
+               kfree_skb(tdata->skb);
+               tdata->skb = NULL;
+       }
 
        task_release_itt(task, task->hdr_itt);
        memset(tdata, 0, sizeof(*tdata));
@@ -2714,6 +2722,9 @@ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
 static int __init libcxgbi_init_module(void)
 {
        pr_info("%s", version);
+
+       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+                    sizeof(struct cxgbi_skb_cb));
        return 0;
 }
 
index 18e0ea83d36172cf2fcc55ecfe8f3b133913f1d5..239462a7576051dca167ad246ba34f07777209cf 100644 (file)
@@ -195,7 +195,8 @@ struct cxgbi_skb_rx_cb {
 };
 
 struct cxgbi_skb_tx_cb {
-       void *l2t;
+       void *handle;
+       void *arp_err_handler;
        struct sk_buff *wr_next;
 };
 
@@ -203,6 +204,7 @@ enum cxgbi_skcb_flags {
        SKCBF_TX_NEED_HDR,      /* packet needs a header */
        SKCBF_TX_MEM_WRITE,     /* memory write */
        SKCBF_TX_FLAG_COMPL,    /* wr completion flag */
+       SKCBF_TX_DONE,          /* skb tx done */
        SKCBF_RX_COALESCED,     /* received whole pdu */
        SKCBF_RX_HDR,           /* received pdu header */
        SKCBF_RX_DATA,          /* received pdu payload */
@@ -215,13 +217,13 @@ enum cxgbi_skcb_flags {
 };
 
 struct cxgbi_skb_cb {
-       unsigned char ulp_mode;
-       unsigned long flags;
-       unsigned int seq;
        union {
                struct cxgbi_skb_rx_cb rx;
                struct cxgbi_skb_tx_cb tx;
        };
+       unsigned char ulp_mode;
+       unsigned long flags;
+       unsigned int seq;
 };
 
 #define CXGBI_SKB_CB(skb)      ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
@@ -374,11 +376,9 @@ static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
        cxgbi_skcb_tx_wr_next(skb) = NULL;
        /*
         * We want to take an extra reference since both us and the driver
-        * need to free the packet before it's really freed. We know there's
-        * just one user currently so we use atomic_set rather than skb_get
-        * to avoid the atomic op.
+        * need to free the packet before it's really freed.
         */
-       atomic_set(&skb->users, 2);
+       skb_get(skb);
 
        if (!csk->wr_pending_head)
                csk->wr_pending_head = skb;
index 3cbab8710e58133ead0cb173cb60ea7a4cafc8e2..2ceff585f1896d9762fd288a93f4bcfde0bf2623 100644 (file)
@@ -265,18 +265,16 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
                                      struct list_head *list,
                                      unsigned char *cdb)
 {
-       struct scsi_device *sdev = ctlr->ms_sdev;
-       struct rdac_dh_data *h = sdev->handler_data;
        struct rdac_mode_common *common;
        unsigned data_size;
        struct rdac_queue_data *qdata;
        u8 *lun_table;
 
-       if (h->ctlr->use_ms10) {
+       if (ctlr->use_ms10) {
                struct rdac_pg_expanded *rdac_pg;
 
                data_size = sizeof(struct rdac_pg_expanded);
-               rdac_pg = &h->ctlr->mode_select.expanded;
+               rdac_pg = &ctlr->mode_select.expanded;
                memset(rdac_pg, 0, data_size);
                common = &rdac_pg->common;
                rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
@@ -288,7 +286,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
                struct rdac_pg_legacy *rdac_pg;
 
                data_size = sizeof(struct rdac_pg_legacy);
-               rdac_pg = &h->ctlr->mode_select.legacy;
+               rdac_pg = &ctlr->mode_select.legacy;
                memset(rdac_pg, 0, data_size);
                common = &rdac_pg->common;
                rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
@@ -304,7 +302,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
        }
 
        /* Prepare the command. */
-       if (h->ctlr->use_ms10) {
+       if (ctlr->use_ms10) {
                cdb[0] = MODE_SELECT_10;
                cdb[7] = data_size >> 8;
                cdb[8] = data_size & 0xff;
index d390325c99ecf9487c9b4441fd0e25aec05378c7..abf6026645dd2308fba55179ca750b0e786da5fa 100644 (file)
@@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
                cmd = list_first_entry_or_null(&vscsi->free_cmd,
                                               struct ibmvscsis_cmd, list);
                if (cmd) {
+                       if (cmd->abort_cmd)
+                               cmd->abort_cmd = NULL;
                        cmd->flags &= ~(DELAY_SEND);
                        list_del(&cmd->list);
                        cmd->iue = iue;
@@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
                                if (cmd->abort_cmd) {
                                        retry = true;
                                        cmd->abort_cmd->flags &= ~(DELAY_SEND);
+                                       cmd->abort_cmd = NULL;
                                }
 
                                /*
@@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
                                        list_del(&cmd->list);
                                        ibmvscsis_free_cmd_resources(vscsi,
                                                                     cmd);
+                                       /*
+                                        * With a successfully aborted op
+                                        * through LIO we want to increment the
+                                        * the vscsi credit so that when we dont
+                                        * send a rsp to the original scsi abort
+                                        * op (h_send_crq), but the tm rsp to
+                                        * the abort is sent, the credit is
+                                        * correctly sent with the abort tm rsp.
+                                        * We would need 1 for the abort tm rsp
+                                        * and 1 credit for the aborted scsi op.
+                                        * Thus we need to increment here.
+                                        * Also we want to increment the credit
+                                        * here because we want to make sure
+                                        * cmd is actually released first
+                                        * otherwise the client will think it
+                                        * it can send a new cmd, and we could
+                                        * find ourselves short of cmd elements.
+                                        */
+                                       vscsi->credit += 1;
                                } else {
                                        iue = cmd->iue;
 
@@ -2962,10 +2984,7 @@ static long srp_build_response(struct scsi_info *vscsi,
 
        rsp->opcode = SRP_RSP;
 
-       if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
-               rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
-       else
-               rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+       rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
        rsp->tag = cmd->rsp.tag;
        rsp->flags = 0;
 
index 5ca3e8c28a3f6af3b4faf1d82ec1ab6f37480127..32632c9b22766d59e223a256c430810a4fa33bec 100644 (file)
@@ -38,7 +38,7 @@ struct qedi_endpoint;
 #define QEDI_MAX_ISCSI_TASK            4096
 #define QEDI_MAX_TASK_NUM              0x0FFF
 #define QEDI_MAX_ISCSI_CONNS_PER_HBA   1024
-#define QEDI_ISCSI_MAX_BDS_PER_CMD     256     /* Firmware max BDs is 256 */
+#define QEDI_ISCSI_MAX_BDS_PER_CMD     255     /* Firmware max BDs is 255 */
 #define MAX_OUSTANDING_TASKS_PER_CON   1024
 
 #define QEDI_MAX_BD_LEN                0xffff
@@ -63,6 +63,7 @@ struct qedi_endpoint;
 #define QEDI_PAGE_MASK         (~((QEDI_PAGE_SIZE) - 1))
 
 #define QEDI_PAGE_SIZE         4096
+#define QEDI_HW_DMA_BOUNDARY   0xfff
 #define QEDI_PATH_HANDLE       0xFE0000000UL
 
 struct qedi_uio_ctrl {
index d6978cbc56f0586aa8a075191433184c50c93b01..8bc7ee1a8ca81626829329e80831ffa2d57b8c63 100644 (file)
@@ -1494,6 +1494,8 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
        tmf_hdr = (struct iscsi_tm *)mtask->hdr;
        qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
        ep = qedi_conn->ep;
+       if (!ep)
+               return -ENODEV;
 
        tid = qedi_get_task_idx(qedi);
        if (tid == -1)
index 3548d46f9b275825a9f76765966cf8c943762e88..87f0af358b33ae4563ba2ee83a1aaa4ce9ca7945 100644 (file)
@@ -59,6 +59,7 @@ struct scsi_host_template qedi_host_template = {
        .this_id = -1,
        .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
        .max_sectors = 0xffff,
+       .dma_boundary = QEDI_HW_DMA_BOUNDARY,
        .cmd_per_lun = 128,
        .use_clustering = ENABLE_CLUSTERING,
        .shost_attrs = qedi_shost_attrs,
@@ -1223,8 +1224,12 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
 
        iscsi_cid = (u32)path_data->handle;
        qedi_ep = qedi->ep_tbl[iscsi_cid];
-       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+       if (!qedi_ep) {
+               ret = -EINVAL;
+               goto set_path_exit;
+       }
 
        if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
                QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
index 92775a8b74b1cdc068b8d8808b7bd9ccd5b212eb..09a294634bc7e8898a2d209a9a5cef3d50eb8f32 100644 (file)
@@ -151,6 +151,11 @@ static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
 
 static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
 {
+       if (udev->uctrl) {
+               free_page((unsigned long)udev->uctrl);
+               udev->uctrl = NULL;
+       }
+
        if (udev->ll2_ring) {
                free_page((unsigned long)udev->ll2_ring);
                udev->ll2_ring = NULL;
@@ -169,7 +174,6 @@ static void __qedi_free_uio(struct qedi_uio_dev *udev)
        __qedi_free_uio_rings(udev);
 
        pci_dev_put(udev->pdev);
-       kfree(udev->uctrl);
        kfree(udev);
 }
 
@@ -208,6 +212,11 @@ static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
        if (udev->ll2_ring || udev->ll2_buf)
                return rc;
 
+       /* Memory for control area.  */
+       udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!udev->uctrl)
+               return -ENOMEM;
+
        /* Allocating memory for LL2 ring  */
        udev->ll2_ring_size = QEDI_PAGE_SIZE;
        udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
@@ -237,7 +246,6 @@ exit_alloc_ring:
 static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
 {
        struct qedi_uio_dev *udev = NULL;
-       struct qedi_uio_ctrl *uctrl = NULL;
        int rc = 0;
 
        list_for_each_entry(udev, &qedi_udev_list, list) {
@@ -258,21 +266,14 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
                goto err_udev;
        }
 
-       uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
-       if (!uctrl) {
-               rc = -ENOMEM;
-               goto err_uctrl;
-       }
-
        udev->uio_dev = -1;
 
        udev->qedi = qedi;
        udev->pdev = qedi->pdev;
-       udev->uctrl = uctrl;
 
        rc = __qedi_alloc_uio_rings(udev);
        if (rc)
-               goto err_uio_rings;
+               goto err_uctrl;
 
        list_add(&udev->list, &qedi_udev_list);
 
@@ -283,8 +284,6 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
        udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
        return 0;
 
- err_uio_rings:
-       kfree(uctrl);
  err_uctrl:
        kfree(udev);
  err_udev:
@@ -828,6 +827,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
        qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
        qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
        qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+       qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
+       qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
 
        for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
                if ((1 << log_page_size) == PAGE_SIZE)
index 8ea01904c0eae72b9273919c0214510072e89813..466517c7c8e618112dcdea3716631be9f277d400 100644 (file)
@@ -19,5 +19,3 @@ obj-$(CONFIG_VIDEO_AP1302)     += ap1302.o
 
 obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
 
-ccflags-y += -Werror
-
index 1d7f7ab94cac3b7ebf454b16a890dab9dc255314..6b13a3a66e49e3ee064887fa324685ddf05d1a0c 100644 (file)
@@ -4,5 +4,3 @@ imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o o
 
 ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
 obj-$(CONFIG_VIDEO_OV8858)     += ov8858_driver.o
-
-ccflags-y += -Werror
index fceb9e9b881bac608e81ae76ed50dad139e2432e..c9c0e1245858470147768987c12de5727e68ad22 100644 (file)
@@ -1,3 +1 @@
 obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
-
-ccflags-y += -Werror
index 3fa7c1c1479f330367b7ab01bf41d8c502a69349..f126a89a08e93ff6511b603960285bfbe3a3c4f7 100644 (file)
@@ -351,5 +351,5 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
 DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
 DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
 
-ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror
+ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
 
index 26a9bcd5ee6a40c391195ef13e205d8d936c8a54..0d8f81591bed076fa1f89f7cd27360776488f349 100644 (file)
@@ -3790,6 +3790,8 @@ int iscsi_target_tx_thread(void *arg)
 {
        int ret = 0;
        struct iscsi_conn *conn = arg;
+       bool conn_freed = false;
+
        /*
         * Allow ourselves to be interrupted by SIGINT so that a
         * connection recovery / failure event can be triggered externally.
@@ -3815,12 +3817,14 @@ get_immediate:
                        goto transport_err;
 
                ret = iscsit_handle_response_queue(conn);
-               if (ret == 1)
+               if (ret == 1) {
                        goto get_immediate;
-               else if (ret == -ECONNRESET)
+               } else if (ret == -ECONNRESET) {
+                       conn_freed = true;
                        goto out;
-               else if (ret < 0)
+               } else if (ret < 0) {
                        goto transport_err;
+               }
        }
 
 transport_err:
@@ -3830,8 +3834,13 @@ transport_err:
         * responsible for cleaning up the early connection failure.
         */
        if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
-               iscsit_take_action_for_connection_exit(conn);
+               iscsit_take_action_for_connection_exit(conn, &conn_freed);
 out:
+       if (!conn_freed) {
+               while (!kthread_should_stop()) {
+                       msleep(100);
+               }
+       }
        return 0;
 }
 
@@ -4004,6 +4013,7 @@ int iscsi_target_rx_thread(void *arg)
 {
        int rc;
        struct iscsi_conn *conn = arg;
+       bool conn_freed = false;
 
        /*
         * Allow ourselves to be interrupted by SIGINT so that a
@@ -4016,7 +4026,7 @@ int iscsi_target_rx_thread(void *arg)
         */
        rc = wait_for_completion_interruptible(&conn->rx_login_comp);
        if (rc < 0 || iscsi_target_check_conn_state(conn))
-               return 0;
+               goto out;
 
        if (!conn->conn_transport->iscsit_get_rx_pdu)
                return 0;
@@ -4025,7 +4035,15 @@ int iscsi_target_rx_thread(void *arg)
 
        if (!signal_pending(current))
                atomic_set(&conn->transport_failed, 1);
-       iscsit_take_action_for_connection_exit(conn);
+       iscsit_take_action_for_connection_exit(conn, &conn_freed);
+
+out:
+       if (!conn_freed) {
+               while (!kthread_should_stop()) {
+                       msleep(100);
+               }
+       }
+
        return 0;
 }
 
index 9a96e17bf7cd5f7448c880ffafcaa123730ebe71..7fe2aa73cff69e04f8df8d79e3af1c634fb5ca04 100644 (file)
@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
        }
 }
 
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
 {
+       *conn_freed = false;
+
        spin_lock_bh(&conn->state_lock);
        if (atomic_read(&conn->connection_exit)) {
                spin_unlock_bh(&conn->state_lock);
@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
        if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
                spin_unlock_bh(&conn->state_lock);
                iscsit_close_connection(conn);
+               *conn_freed = true;
                return;
        }
 
@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
        spin_unlock_bh(&conn->state_lock);
 
        iscsit_handle_connection_cleanup(conn);
+       *conn_freed = true;
 }
index 60e69e2af6eda981efb74e4ac313fb0d031093bd..3822d9cd12302071467af03d4920fda601fdd351 100644 (file)
@@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
 extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
 
 #endif   /*** ISCSI_TARGET_ERL0_H ***/
index 66238477137bc46d35cade3167451e19f2d401ce..92b96b51d5068e77c45d85a5a4d16efc9ffa5a93 100644 (file)
@@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg)
                        break;
        }
 
+       while (!kthread_should_stop()) {
+               msleep(100);
+       }
+
        return 0;
 }
index 7ccc9c1cbfd1a664fb4c37a5dd71f305e735f4bb..6f88b31242b0562b297e60fdf61552719ed7a97c 100644 (file)
@@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
 
 static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
 
-static bool iscsi_target_sk_state_check(struct sock *sk)
+static bool __iscsi_target_sk_check_close(struct sock *sk)
 {
        if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
-               pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
+               pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
                        "returning FALSE\n");
-               return false;
+               return true;
        }
-       return true;
+       return false;
+}
+
+static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               read_lock_bh(&sk->sk_callback_lock);
+               state = (__iscsi_target_sk_check_close(sk) ||
+                        test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+               read_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
+}
+
+static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               read_lock_bh(&sk->sk_callback_lock);
+               state = test_bit(flag, &conn->login_flags);
+               read_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
+}
+
+static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               write_lock_bh(&sk->sk_callback_lock);
+               state = (__iscsi_target_sk_check_close(sk) ||
+                        test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+               if (!state)
+                       clear_bit(flag, &conn->login_flags);
+               write_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
 }
 
 static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
@@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
        pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
                        conn, current->comm, current->pid);
+       /*
+        * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
+        * before initial PDU processing in iscsi_target_start_negotiation()
+        * has completed, go ahead and retry until it's cleared.
+        *
+        * Otherwise if the TCP connection drops while this is occuring,
+        * iscsi_target_start_negotiation() will detect the failure, call
+        * cancel_delayed_work_sync(&conn->login_work), and cleanup the
+        * remaining iscsi connection resources from iscsi_np process context.
+        */
+       if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
+               schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
+               return;
+       }
 
        spin_lock(&tpg->tpg_state_lock);
        state = (tpg->tpg_state == TPG_STATE_ACTIVE);
@@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
        if (!state) {
                pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
-               return;
+               goto err;
        }
 
-       if (conn->sock) {
-               struct sock *sk = conn->sock->sk;
-
-               read_lock_bh(&sk->sk_callback_lock);
-               state = iscsi_target_sk_state_check(sk);
-               read_unlock_bh(&sk->sk_callback_lock);
-
-               if (!state) {
-                       pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
-                       iscsi_target_restore_sock_callbacks(conn);
-                       iscsi_target_login_drop(conn, login);
-                       iscsit_deaccess_np(np, tpg, tpg_np);
-                       return;
-               }
+       if (iscsi_target_sk_check_close(conn)) {
+               pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+               goto err;
        }
 
        conn->login_kworker = current;
@@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
        flush_signals(current);
        conn->login_kworker = NULL;
 
-       if (rc < 0) {
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
-               return;
-       }
+       if (rc < 0)
+               goto err;
 
        pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
                        conn, current->comm, current->pid);
 
        rc = iscsi_target_do_login(conn, login);
        if (rc < 0) {
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
+               goto err;
        } else if (!rc) {
-               if (conn->sock) {
-                       struct sock *sk = conn->sock->sk;
-
-                       write_lock_bh(&sk->sk_callback_lock);
-                       clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
-                       write_unlock_bh(&sk->sk_callback_lock);
-               }
+               if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
+                       goto err;
        } else if (rc == 1) {
                iscsi_target_nego_release(conn);
                iscsi_post_login_handler(np, conn, zero_tsih);
                iscsit_deaccess_np(np, tpg, tpg_np);
        }
+       return;
+
+err:
+       iscsi_target_restore_sock_callbacks(conn);
+       iscsi_target_login_drop(conn, login);
+       iscsit_deaccess_np(np, tpg, tpg_np);
 }
 
 static void iscsi_target_do_cleanup(struct work_struct *work)
@@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
                orig_state_change(sk);
                return;
        }
+       state = __iscsi_target_sk_check_close(sk);
+       pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
+
        if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
                pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
                         " conn: %p\n", conn);
+               if (state)
+                       set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
                write_unlock_bh(&sk->sk_callback_lock);
                orig_state_change(sk);
                return;
        }
-       if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+       if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
                pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
                         conn);
                write_unlock_bh(&sk->sk_callback_lock);
                orig_state_change(sk);
                return;
        }
+       /*
+        * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
+        * but only queue conn->login_work -> iscsi_target_do_login_rx()
+        * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
+        *
+        * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
+        * will detect the dropped TCP connection from delayed workqueue context.
+        *
+        * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
+        * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
+        * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
+        * via iscsi_target_sk_check_and_clear() is responsible for detecting the
+        * dropped TCP connection in iscsi_np process context, and cleaning up
+        * the remaining iscsi connection resources.
+        */
+       if (state) {
+               pr_debug("iscsi_target_sk_state_change got failed state\n");
+               set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+               state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+               write_unlock_bh(&sk->sk_callback_lock);
 
-       state = iscsi_target_sk_state_check(sk);
-       write_unlock_bh(&sk->sk_callback_lock);
-
-       pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
+               orig_state_change(sk);
 
-       if (!state) {
-               pr_debug("iscsi_target_sk_state_change got failed state\n");
-               schedule_delayed_work(&conn->login_cleanup_work, 0);
+               if (!state)
+                       schedule_delayed_work(&conn->login_work, 0);
                return;
        }
+       write_unlock_bh(&sk->sk_callback_lock);
+
        orig_state_change(sk);
 }
 
@@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
                        if (iscsi_target_handle_csg_one(conn, login) < 0)
                                return -1;
                        if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+                               /*
+                                * Check to make sure the TCP connection has not
+                                * dropped asynchronously while session reinstatement
+                                * was occuring in this kthread context, before
+                                * transitioning to full feature phase operation.
+                                */
+                               if (iscsi_target_sk_check_close(conn))
+                                       return -1;
+
                                login->tsih = conn->sess->tsih;
                                login->login_complete = 1;
                                iscsi_target_restore_sock_callbacks(conn);
@@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
                break;
        }
 
-       if (conn->sock) {
-               struct sock *sk = conn->sock->sk;
-               bool state;
-
-               read_lock_bh(&sk->sk_callback_lock);
-               state = iscsi_target_sk_state_check(sk);
-               read_unlock_bh(&sk->sk_callback_lock);
-
-               if (!state) {
-                       pr_debug("iscsi_target_do_login() failed state for"
-                                " conn: %p\n", conn);
-                       return -1;
-               }
-       }
-
        return 0;
 }
 
@@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation(
 
                write_lock_bh(&sk->sk_callback_lock);
                set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+               set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
                write_unlock_bh(&sk->sk_callback_lock);
        }
-
+       /*
+        * If iscsi_target_do_login returns zero to signal more PDU
+        * exchanges are required to complete the login, go ahead and
+        * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
+        * is still active.
+        *
+        * Otherwise if TCP connection dropped asynchronously, go ahead
+        * and perform connection cleanup now.
+        */
        ret = iscsi_target_do_login(conn, login);
+       if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+               ret = -1;
+
        if (ret < 0) {
                cancel_delayed_work_sync(&conn->login_work);
                cancel_delayed_work_sync(&conn->login_cleanup_work);
index 37f57357d4a0827f5669cb89d1619c4651192547..6025935036c976edeeee0d7a91df79a66aa84a2b 100644 (file)
@@ -1160,15 +1160,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
        if (cmd->unknown_data_length) {
                cmd->data_length = size;
        } else if (size != cmd->data_length) {
-               pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+               pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
                        " %u does not match SCSI CDB Length: %u for SAM Opcode:"
                        " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
                                cmd->data_length, size, cmd->t_task_cdb[0]);
 
-               if (cmd->data_direction == DMA_TO_DEVICE &&
-                   cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
-                       pr_err("Rejecting underflow/overflow WRITE data\n");
-                       return TCM_INVALID_CDB_FIELD;
+               if (cmd->data_direction == DMA_TO_DEVICE) {
+                       if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+                               pr_err_ratelimited("Rejecting underflow/overflow"
+                                                  " for WRITE data CDB\n");
+                               return TCM_INVALID_CDB_FIELD;
+                       }
+                       /*
+                        * Some fabric drivers like iscsi-target still expect to
+                        * always reject overflow writes.  Reject this case until
+                        * full fabric driver level support for overflow writes
+                        * is introduced tree-wide.
+                        */
+                       if (size > cmd->data_length) {
+                               pr_err_ratelimited("Rejecting overflow for"
+                                                  " WRITE control CDB\n");
+                               return TCM_INVALID_CDB_FIELD;
+                       }
                }
                /*
                 * Reject READ_* or WRITE_* with overflow/underflow for
index 9045837f748bd3b602256cfa9e83058ae92b8b33..beb5f098f32d6f7bb5851deb810065ab37e4ac4a 100644 (file)
@@ -97,7 +97,7 @@ struct tcmu_hba {
 
 struct tcmu_dev {
        struct list_head node;
-
+       struct kref kref;
        struct se_device se_dev;
 
        char *name;
@@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
        if (!udev)
                return NULL;
+       kref_init(&udev->kref);
 
        udev->name = kstrdup(name, GFP_KERNEL);
        if (!udev->name) {
@@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
        return 0;
 }
 
+static void tcmu_dev_call_rcu(struct rcu_head *p)
+{
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
+       struct tcmu_dev *udev = TCMU_DEV(dev);
+
+       kfree(udev->uio_info.name);
+       kfree(udev->name);
+       kfree(udev);
+}
+
+static void tcmu_dev_kref_release(struct kref *kref)
+{
+       struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
+       struct se_device *dev = &udev->se_dev;
+
+       call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+}
+
 static int tcmu_release(struct uio_info *info, struct inode *inode)
 {
        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
@@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
        clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
 
        pr_debug("close\n");
-
+       /* release ref from configure */
+       kref_put(&udev->kref, tcmu_dev_kref_release);
        return 0;
 }
 
@@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev)
                dev->dev_attrib.hw_max_sectors = 128;
        dev->dev_attrib.hw_queue_depth = 128;
 
+       /*
+        * Get a ref incase userspace does a close on the uio device before
+        * LIO has initiated tcmu_free_device.
+        */
+       kref_get(&udev->kref);
+
        ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
                                 udev->uio_info.uio_dev->minor);
        if (ret)
@@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev)
        return 0;
 
 err_netlink:
+       kref_put(&udev->kref, tcmu_dev_kref_release);
        uio_unregister_device(&udev->uio_info);
 err_register:
        vfree(udev->mb_addr);
 err_vzalloc:
        kfree(info->name);
+       info->name = NULL;
 
        return ret;
 }
@@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
        return -EINVAL;
 }
 
-static void tcmu_dev_call_rcu(struct rcu_head *p)
-{
-       struct se_device *dev = container_of(p, struct se_device, rcu_head);
-       struct tcmu_dev *udev = TCMU_DEV(dev);
-
-       kfree(udev);
-}
-
 static bool tcmu_dev_configured(struct tcmu_dev *udev)
 {
        return udev->uio_info.uio_dev ? true : false;
@@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev)
                                   udev->uio_info.uio_dev->minor);
 
                uio_unregister_device(&udev->uio_info);
-               kfree(udev->uio_info.name);
-               kfree(udev->name);
        }
-       call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+
+       /* release ref from init */
+       kref_put(&udev->kref, tcmu_dev_kref_release);
 }
 
 enum {
index 4fb3165384c4495bb83a785dc564f76be68e340a..6b137194069fee47bc45b1866865ee27adfd33c5 100644 (file)
@@ -34,9 +34,7 @@ static int tty_port_default_receive_buf(struct tty_port *port,
        if (!disc)
                return 0;
 
-       mutex_lock(&tty->atomic_write_lock);
        ret = tty_ldisc_receive_buf(disc, p, (char *)f, count);
-       mutex_unlock(&tty->atomic_write_lock);
 
        tty_ldisc_deref(disc);
 
index 7a92a5e1d40c6f17227936ee2b6925286deab511..feca75b07fddce01e6e121542d3e0b74d321f85f 100644 (file)
@@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
                                st->global_error = 1;
                }
        }
-       st->va += PAGE_SIZE * nr;
-       st->index += nr;
+       st->va += XEN_PAGE_SIZE * nr;
+       st->index += nr / XEN_PFN_PER_PAGE;
 
        return 0;
 }
index c22eaf162f95c1456563b31a8362da9e531c9185..2a6889b3585f068c73091d8895639b7e941d702a 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1154,6 +1154,17 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
                goto out;
        }
 
+       /*
+        * It is possible, particularly with mixed reads & writes to private
+        * mappings, that we have raced with a PMD fault that overlaps with
+        * the PTE we need to set up.  If so just return and the fault will be
+        * retried.
+        */
+       if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
+               vmf_ret = VM_FAULT_NOPAGE;
+               goto unlock_entry;
+       }
+
        /*
         * Note that we don't bother to use iomap_apply here: DAX required
         * the file system block size to be equal the page size, which means
@@ -1397,6 +1408,18 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
        if (IS_ERR(entry))
                goto fallback;
 
+       /*
+        * It is possible, particularly with mixed reads & writes to private
+        * mappings, that we have raced with a PTE fault that overlaps with
+        * the PMD we need to set up.  If so just return and the fault will be
+        * retried.
+        */
+       if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
+                       !pmd_devmap(*vmf->pmd)) {
+               result = 0;
+               goto unlock_entry;
+       }
+
        /*
         * Note that we don't use iomap_apply here.  We aren't doing I/O, only
         * setting up a mapping, so really we're using iomap_begin() as a way
index f865b96374df2b5c40ecfb13663154499ec09b31..d2955daf17a4fcefa2ded3a412f67732315de12a 100644 (file)
@@ -659,7 +659,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
        struct gfs2_log_header *lh;
        unsigned int tail;
        u32 hash;
-       int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META;
+       int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
        struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
        enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
        lh = page_address(page);
index f5714ee01000de49c5efcd3675226a3a3b7a7074..23542dc44a25c9f398b8a2a69905bf9eafbe5270 100644 (file)
@@ -454,6 +454,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
                        goto out_err_free;
 
                /* fh */
+               rc = -EIO;
                p = xdr_inline_decode(&stream, 4);
                if (!p)
                        goto out_err_free;
index e9b4c3320e371a90020ea25f4be44d2d76508db7..3e24392f2caa1296c4475ed84701fdfabfff467f 100644 (file)
@@ -398,7 +398,6 @@ extern struct file_system_type nfs4_referral_fs_type;
 bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
 struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
                        struct nfs_subversion *);
-void nfs_initialise_sb(struct super_block *);
 int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
 int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
 struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *,
@@ -458,7 +457,6 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
 extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
 
 /* super.c */
-void nfs_clone_super(struct super_block *, struct nfs_mount_info *);
 void nfs_umount_begin(struct super_block *);
 int  nfs_statfs(struct dentry *, struct kstatfs *);
 int  nfs_show_options(struct seq_file *, struct dentry *);
index 1a224a33a6c23c362e1bbacb8150bb9bbf02b3fd..e5686be67be8d361a32344e3aaaae235d739ffd7 100644 (file)
@@ -246,7 +246,7 @@ struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
 
        devname = nfs_devname(dentry, page, PAGE_SIZE);
        if (IS_ERR(devname))
-               mnt = (struct vfsmount *)devname;
+               mnt = ERR_CAST(devname);
        else
                mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata);
 
index 929d09a5310ad7df79be527a45a9aa524825b7f0..319a47db218d133d36b749641b63e5fa4489014c 100644 (file)
@@ -177,7 +177,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
        if (status)
                goto out;
 
-       if (!nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
+       if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
                                    &res->commit_res.verf->verifier)) {
                status = -EAGAIN;
                goto out;
index 692a7a8bfc7afd05ad40d6e04a4a53db07e01753..66776f02211131bd003e31748dec6774a02a077f 100644 (file)
@@ -582,7 +582,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
                         */
                        nfs4_schedule_path_down_recovery(pos);
                default:
-                       spin_lock(&nn->nfs_client_lock);
                        goto out;
                }
 
index adc6ec28d4b59d3181c76ca8f33a9fed25d68ce9..c383d0913b54c90fb96020a62faaf989a5d946b3 100644 (file)
@@ -2094,12 +2094,26 @@ pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
 
+/*
+ * Check for any intersection between the request and the pgio->pg_lseg,
+ * and if none, put this pgio->pg_lseg away.
+ */
+static void
+pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+       if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
+               pnfs_put_lseg(pgio->pg_lseg);
+               pgio->pg_lseg = NULL;
+       }
+}
+
 void
 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
 {
        u64 rd_size = req->wb_bytes;
 
        pnfs_generic_pg_check_layout(pgio);
+       pnfs_generic_pg_check_range(pgio, req);
        if (pgio->pg_lseg == NULL) {
                if (pgio->pg_dreq == NULL)
                        rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
@@ -2131,6 +2145,7 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
                           struct nfs_page *req, u64 wb_size)
 {
        pnfs_generic_pg_check_layout(pgio);
+       pnfs_generic_pg_check_range(pgio, req);
        if (pgio->pg_lseg == NULL) {
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   req->wb_context,
@@ -2191,16 +2206,10 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
                seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
                                     pgio->pg_lseg->pls_range.length);
                req_start = req_offset(req);
-               WARN_ON_ONCE(req_start >= seg_end);
+
                /* start of request is past the last byte of this segment */
-               if (req_start >= seg_end) {
-                       /* reference the new lseg */
-                       if (pgio->pg_ops->pg_cleanup)
-                               pgio->pg_ops->pg_cleanup(pgio);
-                       if (pgio->pg_ops->pg_init)
-                               pgio->pg_ops->pg_init(pgio, req);
+               if (req_start >= seg_end)
                        return 0;
-               }
 
                /* adjust 'size' iff there are fewer bytes left in the
                 * segment than what nfs_generic_pg_test returned */
index 2d05b756a8d6504e79796d71eb5471a578d9ef5a..99731e3e332f3ec32eec26cca47556193dcf68fe 100644 (file)
@@ -593,6 +593,16 @@ pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
        return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2);
 }
 
+static inline bool
+pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req)
+{
+       u64 seg_last = pnfs_end_offset(lseg->pls_range.offset, lseg->pls_range.length);
+       u64 req_last = req_offset(req) + req->wb_bytes;
+
+       return pnfs_is_range_intersecting(lseg->pls_range.offset, seg_last,
+                               req_offset(req), req_last);
+}
+
 extern unsigned int layoutstats_timer;
 
 #ifdef NFS_DEBUG
index 2f3822a4a7d565e9e2e607b61d2c580c1befdd05..eceb4eabb064953f830d1bf97a5d0daeecdd27f0 100644 (file)
@@ -2301,7 +2301,7 @@ EXPORT_SYMBOL_GPL(nfs_remount);
 /*
  * Initialise the common bits of the superblock
  */
-inline void nfs_initialise_sb(struct super_block *sb)
+static void nfs_initialise_sb(struct super_block *sb)
 {
        struct nfs_server *server = NFS_SB(sb);
 
@@ -2348,7 +2348,8 @@ EXPORT_SYMBOL_GPL(nfs_fill_super);
 /*
  * Finish setting up a cloned NFS2/3/4 superblock
  */
-void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
+static void nfs_clone_super(struct super_block *sb,
+                           struct nfs_mount_info *mount_info)
 {
        const struct super_block *old_sb = mount_info->cloned->sb;
        struct nfs_server *server = NFS_SB(sb);
index 12feac6ee2fd461a46c7b06b7a0ed0359fb4dfd1..452334694a5d1f37cc480e5d1cf2873c4246019d 100644 (file)
@@ -334,11 +334,8 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
        if (!p)
                return 0;
        p = xdr_decode_hyper(p, &args->offset);
-       args->count = ntohl(*p++);
-
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
 
+       args->count = ntohl(*p++);
        len = min(args->count, max_blocksize);
 
        /* set up the kvec */
@@ -352,7 +349,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
                v++;
        }
        args->vlen = v;
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -544,11 +541,9 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
        p = decode_fh(p, &args->fh);
        if (!p)
                return 0;
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -574,14 +569,10 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
        args->verf   = p; p += 2;
        args->dircount = ~0;
        args->count  = ntohl(*p++);
-
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        args->count  = min_t(u32, args->count, PAGE_SIZE);
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -599,9 +590,6 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
        args->dircount = ntohl(*p++);
        args->count    = ntohl(*p++);
 
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        len = args->count = min(args->count, max_blocksize);
        while (len > 0) {
                struct page *p = *(rqstp->rq_next_page++);
@@ -609,7 +597,8 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
                        args->buffer = page_address(p);
                len -= PAGE_SIZE;
        }
-       return 1;
+
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
index c453a1998e003d3e900407b266f1a15de5d5d94b..dadb3bf305b22f352a3f91a2df06b30284b4891c 100644 (file)
@@ -1769,6 +1769,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
                        opdesc->op_get_currentstateid(cstate, &op->u);
                op->status = opdesc->op_func(rqstp, cstate, &op->u);
 
+               /* Only from SEQUENCE */
+               if (cstate->status == nfserr_replay_cache) {
+                       dprintk("%s NFS4.1 replay from cache\n", __func__);
+                       status = op->status;
+                       goto out;
+               }
                if (!op->status) {
                        if (opdesc->op_set_currentstateid)
                                opdesc->op_set_currentstateid(cstate, &op->u);
@@ -1779,14 +1785,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
                        if (need_wrongsec_check(rqstp))
                                op->status = check_nfsd_access(current_fh->fh_export, rqstp);
                }
-
 encode_op:
-               /* Only from SEQUENCE */
-               if (cstate->status == nfserr_replay_cache) {
-                       dprintk("%s NFS4.1 replay from cache\n", __func__);
-                       status = op->status;
-                       goto out;
-               }
                if (op->status == nfserr_replay_me) {
                        op->replay = &cstate->replay_owner->so_replay;
                        nfsd4_encode_replay(&resp->xdr, op);
index 6a4947a3f4fa82be4118e4ed538a171118f4baa8..de07ff625777820fefc98bfa56adea81962e8135 100644 (file)
@@ -257,9 +257,6 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
        len = args->count     = ntohl(*p++);
        p++; /* totalcount - unused */
 
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2);
 
        /* set up somewhere to store response.
@@ -275,7 +272,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
                v++;
        }
        args->vlen = v;
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -365,11 +362,9 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli
        p = decode_fh(p, &args->fh);
        if (!p)
                return 0;
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -407,11 +402,9 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
        args->cookie = ntohl(*p++);
        args->count  = ntohl(*p++);
        args->count  = min_t(u32, args->count, PAGE_SIZE);
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 /*
index 358258364616cd3c2fee997daca2a192719cb045..4690cd75d8d7948a056fe899bc4600ade10b8566 100644 (file)
@@ -159,7 +159,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
                                        PTR_ERR(dent_inode));
                kfree(name);
                /* Return the error code. */
-               return (struct dentry *)dent_inode;
+               return ERR_CAST(dent_inode);
        }
        /* It is guaranteed that @name is no longer allocated at this point. */
        if (MREF_ERR(mref) == -ENOENT) {
index 827fc9809bc271f09b2c3b7abf4019c31d0e1636..9f88188060db9c7fa59e6882ecf33b55cf921788 100644 (file)
@@ -119,7 +119,7 @@ check_err:
 
        if (IS_ERR(inode)) {
                mlog_errno(PTR_ERR(inode));
-               result = (void *)inode;
+               result = ERR_CAST(inode);
                goto bail;
        }
 
index 0daac5112f7a32384b5febd39bd8499f31da8c31..c0c9683934b7a7883ab59eb8bbcd412e07385d0b 100644 (file)
@@ -1,5 +1,6 @@
 config OVERLAY_FS
        tristate "Overlay filesystem support"
+       select EXPORTFS
        help
          An overlay filesystem combines two filesystems - an 'upper' filesystem
          and a 'lower' filesystem.  When a name exists in both filesystems, the
index 9008ab9fbd2ebe89d419c249455eb740c48a9eb1..7a44533f4bbf24134a95bdc030bde5779f28457a 100644 (file)
@@ -300,7 +300,11 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
                        return PTR_ERR(fh);
        }
 
-       err = ovl_do_setxattr(upper, OVL_XATTR_ORIGIN, fh, fh ? fh->len : 0, 0);
+       /*
+        * Do not fail when upper doesn't support xattrs.
+        */
+       err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh,
+                                fh ? fh->len : 0, 0);
        kfree(fh);
 
        return err;
@@ -342,13 +346,14 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
        if (tmpfile)
                temp = ovl_do_tmpfile(upperdir, stat->mode);
        else
-               temp = ovl_lookup_temp(workdir, dentry);
-       err = PTR_ERR(temp);
-       if (IS_ERR(temp))
-               goto out1;
-
+               temp = ovl_lookup_temp(workdir);
        err = 0;
-       if (!tmpfile)
+       if (IS_ERR(temp)) {
+               err = PTR_ERR(temp);
+               temp = NULL;
+       }
+
+       if (!err && !tmpfile)
                err = ovl_create_real(wdir, temp, &cattr, NULL, true);
 
        if (new_creds) {
@@ -454,6 +459,11 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
        ovl_path_upper(parent, &parentpath);
        upperdir = parentpath.dentry;
 
+       /* Mark parent "impure" because it may now contain non-pure upper */
+       err = ovl_set_impure(parent, upperdir);
+       if (err)
+               return err;
+
        err = vfs_getattr(&parentpath, &pstat,
                          STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT);
        if (err)
index 723b98b9069876d1656b74735dabcaf01484e26d..a63a71656e9bdaef6ed5cadf8acdb6d8002fe1b6 100644 (file)
@@ -41,7 +41,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
        }
 }
 
-struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
+struct dentry *ovl_lookup_temp(struct dentry *workdir)
 {
        struct dentry *temp;
        char name[20];
@@ -68,7 +68,7 @@ static struct dentry *ovl_whiteout(struct dentry *workdir,
        struct dentry *whiteout;
        struct inode *wdir = workdir->d_inode;
 
-       whiteout = ovl_lookup_temp(workdir, dentry);
+       whiteout = ovl_lookup_temp(workdir);
        if (IS_ERR(whiteout))
                return whiteout;
 
@@ -127,17 +127,28 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
        return err;
 }
 
-static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
+static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper,
+                              int xerr)
 {
        int err;
 
-       err = ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
+       err = ovl_check_setxattr(dentry, upper, OVL_XATTR_OPAQUE, "y", 1, xerr);
        if (!err)
                ovl_dentry_set_opaque(dentry);
 
        return err;
 }
 
+static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
+{
+       /*
+        * Fail with -EIO when trying to create opaque dir and upper doesn't
+        * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to
+        * return a specific error for noxattr case.
+        */
+       return ovl_set_opaque_xerr(dentry, upperdentry, -EIO);
+}
+
 /* Common operations required to be done after creation of file on upper */
 static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
                            struct dentry *newdentry, bool hardlink)
@@ -162,6 +173,11 @@ static bool ovl_type_merge(struct dentry *dentry)
        return OVL_TYPE_MERGE(ovl_path_type(dentry));
 }
 
+static bool ovl_type_origin(struct dentry *dentry)
+{
+       return OVL_TYPE_ORIGIN(ovl_path_type(dentry));
+}
+
 static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
                            struct cattr *attr, struct dentry *hardlink)
 {
@@ -250,7 +266,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
        if (upper->d_parent->d_inode != udir)
                goto out_unlock;
 
-       opaquedir = ovl_lookup_temp(workdir, dentry);
+       opaquedir = ovl_lookup_temp(workdir);
        err = PTR_ERR(opaquedir);
        if (IS_ERR(opaquedir))
                goto out_unlock;
@@ -382,7 +398,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        if (err)
                goto out;
 
-       newdentry = ovl_lookup_temp(workdir, dentry);
+       newdentry = ovl_lookup_temp(workdir);
        err = PTR_ERR(newdentry);
        if (IS_ERR(newdentry))
                goto out_unlock;
@@ -846,18 +862,16 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
        if (IS_ERR(redirect))
                return PTR_ERR(redirect);
 
-       err = ovl_do_setxattr(ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT,
-                             redirect, strlen(redirect), 0);
+       err = ovl_check_setxattr(dentry, ovl_dentry_upper(dentry),
+                                OVL_XATTR_REDIRECT,
+                                redirect, strlen(redirect), -EXDEV);
        if (!err) {
                spin_lock(&dentry->d_lock);
                ovl_dentry_set_redirect(dentry, redirect);
                spin_unlock(&dentry->d_lock);
        } else {
                kfree(redirect);
-               if (err == -EOPNOTSUPP)
-                       ovl_clear_redirect_dir(dentry->d_sb);
-               else
-                       pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
+               pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
                /* Fall back to userspace copy-up */
                err = -EXDEV;
        }
@@ -943,6 +957,25 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
        old_upperdir = ovl_dentry_upper(old->d_parent);
        new_upperdir = ovl_dentry_upper(new->d_parent);
 
+       if (!samedir) {
+               /*
+                * When moving a merge dir or non-dir with copy up origin into
+                * a new parent, we are marking the new parent dir "impure".
+                * When ovl_iterate() iterates an "impure" upper dir, it will
+                * lookup the origin inodes of the entries to fill d_ino.
+                */
+               if (ovl_type_origin(old)) {
+                       err = ovl_set_impure(new->d_parent, new_upperdir);
+                       if (err)
+                               goto out_revert_creds;
+               }
+               if (!overwrite && ovl_type_origin(new)) {
+                       err = ovl_set_impure(old->d_parent, old_upperdir);
+                       if (err)
+                               goto out_revert_creds;
+               }
+       }
+
        trap = lock_rename(new_upperdir, old_upperdir);
 
        olddentry = lookup_one_len(old->d_name.name, old_upperdir,
@@ -992,7 +1025,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
                if (ovl_type_merge_or_lower(old))
                        err = ovl_set_redirect(old, samedir);
                else if (!old_opaque && ovl_type_merge(new->d_parent))
-                       err = ovl_set_opaque(old, olddentry);
+                       err = ovl_set_opaque_xerr(old, olddentry, -EXDEV);
                if (err)
                        goto out_dput;
        }
@@ -1000,7 +1033,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
                if (ovl_type_merge_or_lower(new))
                        err = ovl_set_redirect(new, samedir);
                else if (!new_opaque && ovl_type_merge(old->d_parent))
-                       err = ovl_set_opaque(new, newdentry);
+                       err = ovl_set_opaque_xerr(new, newdentry, -EXDEV);
                if (err)
                        goto out_dput;
        }
index ad9547f82da57fa4bd51eb5738cb8f02235e4455..d613e2c41242a52a6c018f43f9987bdbf461e0bb 100644 (file)
@@ -240,6 +240,16 @@ int ovl_xattr_get(struct dentry *dentry, const char *name,
        return res;
 }
 
+static bool ovl_can_list(const char *s)
+{
+       /* List all non-trusted xatts */
+       if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+               return true;
+
+       /* Never list trusted.overlay, list other trusted for superuser only */
+       return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+}
+
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
        struct dentry *realdentry = ovl_dentry_real(dentry);
@@ -263,7 +273,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
                        return -EIO;
 
                len -= slen;
-               if (ovl_is_private_xattr(s)) {
+               if (!ovl_can_list(s)) {
                        res -= slen;
                        memmove(s, s + slen, len);
                } else {
index bad0f665a63521efde00b4c488d4ed2ba85a5b75..f3136c31e72af24cbb9949449a12d292fc3bf11b 100644 (file)
@@ -169,17 +169,7 @@ invalid:
 
 static bool ovl_is_opaquedir(struct dentry *dentry)
 {
-       int res;
-       char val;
-
-       if (!d_is_dir(dentry))
-               return false;
-
-       res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
-       if (res == 1 && val == 'y')
-               return true;
-
-       return false;
+       return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE);
 }
 
 static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
@@ -351,6 +341,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
        unsigned int ctr = 0;
        struct inode *inode = NULL;
        bool upperopaque = false;
+       bool upperimpure = false;
        char *upperredirect = NULL;
        struct dentry *this;
        unsigned int i;
@@ -395,6 +386,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                                poe = roe;
                }
                upperopaque = d.opaque;
+               if (upperdentry && d.is_dir)
+                       upperimpure = ovl_is_impuredir(upperdentry);
        }
 
        if (!d.stop && poe->numlower) {
@@ -463,6 +456,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
        revert_creds(old_cred);
        oe->opaque = upperopaque;
+       oe->impure = upperimpure;
        oe->redirect = upperredirect;
        oe->__upperdentry = upperdentry;
        memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
index caa36cb9c46de9838805dc40e21672217407d40e..0623cebeefff8661d49d65a228ceec6290cee877 100644 (file)
@@ -24,6 +24,7 @@ enum ovl_path_type {
 #define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
 #define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect"
 #define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin"
+#define OVL_XATTR_IMPURE OVL_XATTR_PREFIX "impure"
 
 /*
  * The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
@@ -203,10 +204,10 @@ struct dentry *ovl_dentry_real(struct dentry *dentry);
 struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry);
 void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache);
 bool ovl_dentry_is_opaque(struct dentry *dentry);
+bool ovl_dentry_is_impure(struct dentry *dentry);
 bool ovl_dentry_is_whiteout(struct dentry *dentry);
 void ovl_dentry_set_opaque(struct dentry *dentry);
 bool ovl_redirect_dir(struct super_block *sb);
-void ovl_clear_redirect_dir(struct super_block *sb);
 const char *ovl_dentry_get_redirect(struct dentry *dentry);
 void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect);
 void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
@@ -219,6 +220,17 @@ bool ovl_is_whiteout(struct dentry *dentry);
 struct file *ovl_path_open(struct path *path, int flags);
 int ovl_copy_up_start(struct dentry *dentry);
 void ovl_copy_up_end(struct dentry *dentry);
+bool ovl_check_dir_xattr(struct dentry *dentry, const char *name);
+int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+                      const char *name, const void *value, size_t size,
+                      int xerr);
+int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
+
+static inline bool ovl_is_impuredir(struct dentry *dentry)
+{
+       return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE);
+}
+
 
 /* namei.c */
 int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
@@ -263,7 +275,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
 
 /* dir.c */
 extern const struct inode_operations ovl_dir_inode_operations;
-struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry);
+struct dentry *ovl_lookup_temp(struct dentry *workdir);
 struct cattr {
        dev_t rdev;
        umode_t mode;
index b2023ddb85323725b8bbfa687f31fc854c1ba5e9..34bc4a9f5c61d95f049b3f34ccd0de243aa27ddd 100644 (file)
@@ -28,6 +28,7 @@ struct ovl_fs {
        /* creds of process who forced instantiation of super block */
        const struct cred *creator_cred;
        bool tmpfile;
+       bool noxattr;
        wait_queue_head_t copyup_wq;
        /* sb common to all layers */
        struct super_block *same_sb;
@@ -42,6 +43,7 @@ struct ovl_entry {
                        u64 version;
                        const char *redirect;
                        bool opaque;
+                       bool impure;
                        bool copying;
                };
                struct rcu_head rcu;
index 9828b7de89992e64a1900a277b58423dde7b992a..4882ffb37baead1c4da41684158d22cbe58e5353 100644 (file)
@@ -891,6 +891,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                                dput(temp);
                        else
                                pr_warn("overlayfs: upper fs does not support tmpfile.\n");
+
+                       /*
+                        * Check if upper/work fs supports trusted.overlay.*
+                        * xattr
+                        */
+                       err = ovl_do_setxattr(ufs->workdir, OVL_XATTR_OPAQUE,
+                                             "0", 1, 0);
+                       if (err) {
+                               ufs->noxattr = true;
+                               pr_warn("overlayfs: upper fs does not support xattr.\n");
+                       } else {
+                               vfs_removexattr(ufs->workdir, OVL_XATTR_OPAQUE);
+                       }
                }
        }
 
@@ -961,7 +974,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        path_put(&workpath);
        kfree(lowertmp);
 
-       oe->__upperdentry = upperpath.dentry;
+       if (upperpath.dentry) {
+               oe->__upperdentry = upperpath.dentry;
+               oe->impure = ovl_is_impuredir(upperpath.dentry);
+       }
        for (i = 0; i < numlower; i++) {
                oe->lowerstack[i].dentry = stack[i].dentry;
                oe->lowerstack[i].mnt = ufs->lower_mnt[i];
index cfdea47313a10e22a9c06193e4cc422891badaae..809048913889189d083339d1d015ef4cad2af035 100644 (file)
@@ -175,6 +175,13 @@ bool ovl_dentry_is_opaque(struct dentry *dentry)
        return oe->opaque;
 }
 
+bool ovl_dentry_is_impure(struct dentry *dentry)
+{
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       return oe->impure;
+}
+
 bool ovl_dentry_is_whiteout(struct dentry *dentry)
 {
        return !dentry->d_inode && ovl_dentry_is_opaque(dentry);
@@ -191,14 +198,7 @@ bool ovl_redirect_dir(struct super_block *sb)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
 
-       return ofs->config.redirect_dir;
-}
-
-void ovl_clear_redirect_dir(struct super_block *sb)
-{
-       struct ovl_fs *ofs = sb->s_fs_info;
-
-       ofs->config.redirect_dir = false;
+       return ofs->config.redirect_dir && !ofs->noxattr;
 }
 
 const char *ovl_dentry_get_redirect(struct dentry *dentry)
@@ -303,3 +303,59 @@ void ovl_copy_up_end(struct dentry *dentry)
        wake_up_locked(&ofs->copyup_wq);
        spin_unlock(&ofs->copyup_wq.lock);
 }
+
+bool ovl_check_dir_xattr(struct dentry *dentry, const char *name)
+{
+       int res;
+       char val;
+
+       if (!d_is_dir(dentry))
+               return false;
+
+       res = vfs_getxattr(dentry, name, &val, 1);
+       if (res == 1 && val == 'y')
+               return true;
+
+       return false;
+}
+
+int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+                      const char *name, const void *value, size_t size,
+                      int xerr)
+{
+       int err;
+       struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+
+       if (ofs->noxattr)
+               return xerr;
+
+       err = ovl_do_setxattr(upperdentry, name, value, size, 0);
+
+       if (err == -EOPNOTSUPP) {
+               pr_warn("overlayfs: cannot set %s xattr on upper\n", name);
+               ofs->noxattr = true;
+               return xerr;
+       }
+
+       return err;
+}
+
+int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
+{
+       int err;
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       if (oe->impure)
+               return 0;
+
+       /*
+        * Do not fail when upper doesn't support xattrs.
+        * Upper inodes won't have origin nor redirect xattr anyway.
+        */
+       err = ovl_check_setxattr(dentry, upperdentry, OVL_XATTR_IMPURE,
+                                "y", 1, 0);
+       if (!err)
+               oe->impure = true;
+
+       return err;
+}
index 45f6bf68fff3ed30df0f85d98c0f644c320a9bf7..f1e1927ccd484e7372fe2a38db7455468bbf06e8 100644 (file)
@@ -821,7 +821,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
        if (!mmget_not_zero(mm))
                goto free;
 
-       flags = write ? FOLL_WRITE : 0;
+       flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
 
        while (count > 0) {
                int this_len = min_t(int, count, PAGE_SIZE);
index da01f497180a165d163935c4744ef82521bf9151..39bb1e838d8da683fa64dadf7ff150b9369d699e 100644 (file)
@@ -1112,7 +1112,7 @@ static int flush_commit_list(struct super_block *s,
                depth = reiserfs_write_unlock_nested(s);
                if (reiserfs_barrier_flush(s))
                        __sync_dirty_buffer(jl->j_commit_bh,
-                                       REQ_PREFLUSH | REQ_FUA);
+                                       REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
                else
                        sync_dirty_buffer(jl->j_commit_bh);
                reiserfs_write_lock_nested(s, depth);
@@ -1271,7 +1271,7 @@ static int _update_journal_header_block(struct super_block *sb,
 
                if (reiserfs_barrier_flush(sb))
                        __sync_dirty_buffer(journal->j_header_bh,
-                                       REQ_PREFLUSH | REQ_FUA);
+                                       REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
                else
                        sync_dirty_buffer(journal->j_header_bh);
 
index 131b2b77c8185403dc3cdf0393e8e0c915f3a850..29ecaf739449c4036e6ed3ebed5499b8489079c4 100644 (file)
@@ -812,9 +812,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
        uspi->s_dirblksize = UFS_SECTOR_SIZE;
        super_block_offset=UFS_SBLOCK;
 
-       /* Keep 2Gig file limit. Some UFS variants need to override 
-          this but as I don't know which I'll let those in the know loosen
-          the rules */
+       sb->s_maxbytes = MAX_LFS_FILESIZE;
+
        switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
        case UFS_MOUNT_UFSTYPE_44BSD:
                UFSD("ufstype=44bsd\n");
index 62fa39276a24bd91c26e3aa18c9f162f19842b95..07b77b73b0240c5cca4187d29e8e403cbf4f0714 100644 (file)
@@ -97,12 +97,16 @@ static inline void
 xfs_buf_ioacct_inc(
        struct xfs_buf  *bp)
 {
-       if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
+       if (bp->b_flags & XBF_NO_IOACCT)
                return;
 
        ASSERT(bp->b_flags & XBF_ASYNC);
-       bp->b_flags |= _XBF_IN_FLIGHT;
-       percpu_counter_inc(&bp->b_target->bt_io_count);
+       spin_lock(&bp->b_lock);
+       if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
+               bp->b_state |= XFS_BSTATE_IN_FLIGHT;
+               percpu_counter_inc(&bp->b_target->bt_io_count);
+       }
+       spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -110,14 +114,24 @@ xfs_buf_ioacct_inc(
  * freed and unaccount from the buftarg.
  */
 static inline void
-xfs_buf_ioacct_dec(
+__xfs_buf_ioacct_dec(
        struct xfs_buf  *bp)
 {
-       if (!(bp->b_flags & _XBF_IN_FLIGHT))
-               return;
+       ASSERT(spin_is_locked(&bp->b_lock));
 
-       bp->b_flags &= ~_XBF_IN_FLIGHT;
-       percpu_counter_dec(&bp->b_target->bt_io_count);
+       if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
+               bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
+               percpu_counter_dec(&bp->b_target->bt_io_count);
+       }
+}
+
+static inline void
+xfs_buf_ioacct_dec(
+       struct xfs_buf  *bp)
+{
+       spin_lock(&bp->b_lock);
+       __xfs_buf_ioacct_dec(bp);
+       spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -149,9 +163,9 @@ xfs_buf_stale(
         * unaccounted (released to LRU) before that occurs. Drop in-flight
         * status now to preserve accounting consistency.
         */
-       xfs_buf_ioacct_dec(bp);
-
        spin_lock(&bp->b_lock);
+       __xfs_buf_ioacct_dec(bp);
+
        atomic_set(&bp->b_lru_ref, 0);
        if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
            (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -979,12 +993,12 @@ xfs_buf_rele(
                 * ensures the decrement occurs only once per-buf.
                 */
                if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
-                       xfs_buf_ioacct_dec(bp);
+                       __xfs_buf_ioacct_dec(bp);
                goto out_unlock;
        }
 
        /* the last reference has been dropped ... */
-       xfs_buf_ioacct_dec(bp);
+       __xfs_buf_ioacct_dec(bp);
        if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
                /*
                 * If the buffer is added to the LRU take a new reference to the
index 8d1d44f87ce98834ad67cee6840d96d07d5666c0..1508121f29f29191da1a4efc7c8f12cf42eb0107 100644 (file)
@@ -63,7 +63,6 @@ typedef enum {
 #define _XBF_KMEM       (1 << 21)/* backed by heap memory */
 #define _XBF_DELWRI_Q   (1 << 22)/* buffer on a delwri queue */
 #define _XBF_COMPOUND   (1 << 23)/* compound buffer */
-#define _XBF_IN_FLIGHT  (1 << 25) /* I/O in flight, for accounting purposes */
 
 typedef unsigned int xfs_buf_flags_t;
 
@@ -84,14 +83,14 @@ typedef unsigned int xfs_buf_flags_t;
        { _XBF_PAGES,           "PAGES" }, \
        { _XBF_KMEM,            "KMEM" }, \
        { _XBF_DELWRI_Q,        "DELWRI_Q" }, \
-       { _XBF_COMPOUND,        "COMPOUND" }, \
-       { _XBF_IN_FLIGHT,       "IN_FLIGHT" }
+       { _XBF_COMPOUND,        "COMPOUND" }
 
 
 /*
  * Internal state flags.
  */
 #define XFS_BSTATE_DISPOSE      (1 << 0)       /* buffer being discarded */
+#define XFS_BSTATE_IN_FLIGHT    (1 << 1)       /* I/O in flight */
 
 /*
  * The xfs_buftarg contains 2 notions of "sector size" -
index c0bd0d7651a947bf06407d7c680dde5c377b9b26..bb837310c07e98c529472abceda0a8b426d8c9a5 100644 (file)
@@ -913,4 +913,55 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux);
 int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
 int drm_dp_stop_crc(struct drm_dp_aux *aux);
 
+struct drm_dp_dpcd_ident {
+       u8 oui[3];
+       u8 device_id[6];
+       u8 hw_rev;
+       u8 sw_major_rev;
+       u8 sw_minor_rev;
+} __packed;
+
+/**
+ * struct drm_dp_desc - DP branch/sink device descriptor
+ * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
+ * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
+ */
+struct drm_dp_desc {
+       struct drm_dp_dpcd_ident ident;
+       u32 quirks;
+};
+
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+                    bool is_branch);
+
+/**
+ * enum drm_dp_quirk - Display Port sink/branch device specific quirks
+ *
+ * Display Port sink and branch devices in the wild have a variety of bugs, try
+ * to collect them here. The quirks are shared, but it's up to the drivers to
+ * implement workarounds for them.
+ */
+enum drm_dp_quirk {
+       /**
+        * @DP_DPCD_QUIRK_LIMITED_M_N:
+        *
+        * The device requires main link attributes Mvid and Nvid to be limited
+        * to 16 bits.
+        */
+       DP_DPCD_QUIRK_LIMITED_M_N,
+};
+
+/**
+ * drm_dp_has_quirk() - does the DP device have a specific quirk
+ * @desc: Device decriptor filled by drm_dp_read_desc()
+ * @quirk: Quirk to query for
+ *
+ * Return true if DP device identified by @desc has @quirk.
+ */
+static inline bool
+drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+{
+       return desc->quirks & BIT(quirk);
+}
+
 #endif /* _DRM_DP_HELPER_H_ */
index 21745946cae154f53cd87311e9350465388f70a5..ec47101cb1bf80f0867dbcff1d6aa10878df7418 100644 (file)
@@ -48,6 +48,7 @@ enum {
        CSS_ONLINE      = (1 << 1), /* between ->css_online() and ->css_offline() */
        CSS_RELEASED    = (1 << 2), /* refcnt reached zero, released */
        CSS_VISIBLE     = (1 << 3), /* css is visible to userland */
+       CSS_DYING       = (1 << 4), /* css is dying */
 };
 
 /* bits in struct cgroup flags field */
index ed2573e149faf070714e04f8160f7056b2fb1d3e..710a005c6b7a652bb9c32b5457dbd64196f6f4a0 100644 (file)
@@ -343,6 +343,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
        return true;
 }
 
+/**
+ * css_is_dying - test whether the specified css is dying
+ * @css: target css
+ *
+ * Test whether @css is in the process of offlining or already offline.  In
+ * most cases, ->css_online() and ->css_offline() callbacks should be
+ * enough; however, the actual offline operations are RCU delayed and this
+ * test returns %true also when @css is scheduled to be offlined.
+ *
+ * This is useful, for example, when the use case requires synchronous
+ * behavior with respect to cgroup removal.  cgroup removal schedules css
+ * offlining but the css can seem alive while the operation is being
+ * delayed.  If the delay affects user visible semantics, this test can be
+ * used to resolve the situation.
+ */
+static inline bool css_is_dying(struct cgroup_subsys_state *css)
+{
+       return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+}
+
 /**
  * css_put - put a css reference
  * @css: target css
index de179993e039d41d7e9034b5744be40954f81c09..ea9126006a69f94c7b8ea1705bf43bdf7b48e16c 100644 (file)
  * with any version that can compile the kernel
  */
 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+/*
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function.  This turns out to avoid the need for complex #ifdef
+ * directives.  Suppress the warning in clang as well.
+ */
+#define inline inline __attribute__((unused))
index 9ec5e22846e0f302e7e1b1d174d9ead17e619ee3..0e306c5a86d6ee90debc824aa5d18e8f6d078f4d 100644 (file)
@@ -153,7 +153,7 @@ struct elevator_type
 #endif
 
        /* managed by elevator core */
-       char icq_cache_name[ELV_NAME_MAX + 5];  /* elvname + "_io_cq" */
+       char icq_cache_name[ELV_NAME_MAX + 6];  /* elvname + "_io_cq" */
        struct list_head list;
 };
 
index 2b1a44f5bdb60e6a28d874630f71dec7a0f4c40b..a89d37e8b3873cc8e6fa79389a6c0d3a2f4843ab 100644 (file)
@@ -41,7 +41,7 @@ struct vm_area_struct;
 #define ___GFP_WRITE           0x800000u
 #define ___GFP_KSWAPD_RECLAIM  0x1000000u
 #ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP       0x4000000u
+#define ___GFP_NOLOCKDEP       0x2000000u
 #else
 #define ___GFP_NOLOCKDEP       0
 #endif
index c0d712d22b079ebc16129ef2618f41762276cf5e..f738d50cc17d3fcaa0b9b7cf681b70dd2646897d 100644 (file)
@@ -56,7 +56,14 @@ struct gpiod_lookup_table {
        .flags = _flags,                                                  \
 }
 
+#ifdef CONFIG_GPIOLIB
 void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
 void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
+#else
+static inline
+void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
+static inline
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
+#endif
 
 #endif /* __LINUX_GPIO_MACHINE_H */
index 36872fbb815d72203e14582e3dab6ba5051ee8a7..734377ad42e9f0e4719edc4750b57245fa2d3f17 100644 (file)
@@ -64,13 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate);
 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
 
+#ifndef __jiffy_arch_data
+#define __jiffy_arch_data
+#endif
+
 /*
  * The 64-bit value is not atomic - you MUST NOT read it
  * without sampling the sequence number in jiffies_lock.
  * get_jiffies_64() will do this for you as appropriate.
  */
 extern u64 __cacheline_aligned_in_smp jiffies_64;
-extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
+extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
 
 #if (BITS_PER_LONG < 64)
 u64 get_jiffies_64(void);
index 4ce24a3762627be20e805da3eaab26ba1bc47578..8098695e5d8d9dfba3815fd359823f92833a5d61 100644 (file)
@@ -425,12 +425,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
 }
 #endif
 
+extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+               phys_addr_t end_addr);
 #else
 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
 {
        return 0;
 }
 
+static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+               phys_addr_t end_addr)
+{
+       return 0;
+}
+
 #endif /* CONFIG_HAVE_MEMBLOCK */
 
 #endif /* __KERNEL__ */
index b4ee8f62ce8da82720cb79e7a9ea3ec97703b849..8e2828d48d7fcf6a7ba683bf51c8c91a1ad28ec0 100644 (file)
@@ -470,6 +470,7 @@ struct mlx4_update_qp_params {
        u16     rate_val;
 };
 
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params);
index 32de0724b40009adc2a802dcc5abafbb5505c0b1..edafedb7b509010c904fccf2cdeffea1afa9317b 100644 (file)
@@ -766,6 +766,12 @@ enum {
        MLX5_CAP_PORT_TYPE_ETH = 0x1,
 };
 
+enum {
+       MLX5_CAP_UMR_FENCE_STRONG       = 0x0,
+       MLX5_CAP_UMR_FENCE_SMALL        = 0x1,
+       MLX5_CAP_UMR_FENCE_NONE         = 0x2,
+};
+
 struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_0[0x80];
 
@@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_202[0x1];
        u8         ipoib_enhanced_offloads[0x1];
        u8         ipoib_basic_offloads[0x1];
-       u8         reserved_at_205[0xa];
+       u8         reserved_at_205[0x5];
+       u8         umr_fence[0x2];
+       u8         reserved_at_20c[0x3];
        u8         drain_sigerr[0x1];
        u8         cmdif_checksum[0x2];
        u8         sigerr_cqe[0x1];
index 7cb17c6b97de38b1e8d55ca6d8c90b8415a9c3fa..b892e95d4929d311b51877dfb9eb3de67780bbdf 100644 (file)
@@ -2327,6 +2327,17 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
 #define FOLL_REMOTE    0x2000  /* we are working on non-current tsk/mm */
 #define FOLL_COW       0x4000  /* internal GUP flag */
 
+static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
+{
+       if (vm_fault & VM_FAULT_OOM)
+               return -ENOMEM;
+       if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+               return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
+       if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
+               return -EFAULT;
+       return 0;
+}
+
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
index ebaccd4e7d8cdc5f5ef13fed1475a8b202d93628..ef6a13b7bd3e851385bea32434e207a5cf6eec7f 100644 (file)
@@ -678,6 +678,7 @@ typedef struct pglist_data {
         * is the first PFN that needs to be initialised.
         */
        unsigned long first_deferred_pfn;
+       unsigned long static_init_size;
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 566fda587fcf7a76af1c6a01e84ce33336606f3f..3f74ef2281e8afac1e4667b4fbf4abcc5f89ff17 100644 (file)
@@ -467,6 +467,7 @@ enum dmi_field {
        DMI_PRODUCT_VERSION,
        DMI_PRODUCT_SERIAL,
        DMI_PRODUCT_UUID,
+       DMI_PRODUCT_FAMILY,
        DMI_BOARD_VENDOR,
        DMI_BOARD_NAME,
        DMI_BOARD_VERSION,
index 279e3c5326e3a4e65bf6f86556ea984a69d0ca9c..7620eb127cffc5edbc475457732a042bac357055 100644 (file)
@@ -42,8 +42,6 @@
  * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
  *     impedance to VDD). If the argument is != 0 pull-up is enabled,
  *     if it is 0, pull-up is total, i.e. the pin is connected to VDD.
- * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous
- *     input and output operations.
  * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
  *     collector) which means it is usually wired with other output ports
  *     which are then pulled up with an external resistor. Setting this
@@ -98,7 +96,6 @@ enum pin_config_param {
        PIN_CONFIG_BIAS_PULL_DOWN,
        PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
        PIN_CONFIG_BIAS_PULL_UP,
-       PIN_CONFIG_BIDIRECTIONAL,
        PIN_CONFIG_DRIVE_OPEN_DRAIN,
        PIN_CONFIG_DRIVE_OPEN_SOURCE,
        PIN_CONFIG_DRIVE_PUSH_PULL,
index 94631026f79c56f022976a85dcde92379507e87c..11cef5a7bc87a9fe67a4bfbfca9f52e41d0abd88 100644 (file)
@@ -336,7 +336,8 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
 {
        char *cp = (char *)p;
        struct kvec *vec = &rqstp->rq_arg.head[0];
-       return cp == (char *)vec->iov_base + vec->iov_len;
+       return cp >= (char*)vec->iov_base
+               && cp <= (char*)vec->iov_base + vec->iov_len;
 }
 
 static inline int
index 0b1cf32edfd7ba1c456252124e23c68450d5bcc3..d9718378a8bee0b327d08c2e80a6fd3b5490b967 100644 (file)
@@ -189,8 +189,6 @@ struct platform_suspend_ops {
 struct platform_freeze_ops {
        int (*begin)(void);
        int (*prepare)(void);
-       void (*wake)(void);
-       void (*sync)(void);
        void (*restore)(void);
        void (*end)(void);
 };
@@ -430,8 +428,7 @@ extern unsigned int pm_wakeup_irq;
 
 extern bool pm_wakeup_pending(void);
 extern void pm_system_wakeup(void);
-extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(void);
 extern void pm_system_irq_wakeup(unsigned int irq_number);
 extern bool pm_get_wakeup_count(unsigned int *count, bool block);
 extern bool pm_save_wakeup_count(unsigned int count);
@@ -481,7 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
 
 static inline bool pm_wakeup_pending(void) { return false; }
 static inline void pm_system_wakeup(void) {}
-static inline void pm_wakeup_clear(bool reset) {}
+static inline void pm_wakeup_clear(void) {}
 static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
 
 static inline void lock_system_sleep(void) {}
index eb50ce54b759154b99cf333e177adf8ca229c69b..413335c8cb529a8506a2f934577c3413512d8c97 100644 (file)
@@ -29,7 +29,7 @@ struct edid;
 struct cec_adapter;
 struct cec_notifier;
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
 
 /**
  * cec_notifier_get - find or create a new cec_notifier for the given device.
index b8eb895731d561a5f0ff4ec218055581ad6e21ee..bfa88d4d67e1d6663da4952a6a097e32f5e573a8 100644 (file)
@@ -173,7 +173,7 @@ struct cec_adapter {
        bool passthrough;
        struct cec_log_addrs log_addrs;
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
        struct cec_notifier *notifier;
 #endif
 
@@ -300,7 +300,7 @@ u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
  */
 int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
 void cec_register_cec_notifier(struct cec_adapter *adap,
                               struct cec_notifier *notifier);
 #endif
index dbf0abba33b8da21be05abf6e719f69542da80fc..3e505bbff8ca4a41f8d39fefcd59aa01b85424f4 100644 (file)
@@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
  */
 extern const struct proto_ops inet6_stream_ops;
 extern const struct proto_ops inet6_dgram_ops;
+extern const struct proto_ops inet6_sockraw_ops;
 
 struct group_source_req;
 struct group_filter;
index 38a7427ae902e35973a8b7fa0e95ff602ede0e87..be6223c586fa05b3ef1dbcb96e73cf7b5dae292d 100644 (file)
@@ -924,7 +924,7 @@ struct tcp_congestion_ops {
        void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
        /* call when ack arrives (optional) */
        void (*in_ack_event)(struct sock *sk, u32 flags);
-       /* new value of cwnd after loss (optional) */
+       /* new value of cwnd after loss (required) */
        u32  (*undo_cwnd)(struct sock *sk);
        /* hook for packet ack accounting (optional) */
        void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
index f5f70e345318151356e022ce46b623a357031920..355b81f4242defd82a3802cd47e2a28abfb24ee5 100644 (file)
@@ -158,7 +158,6 @@ enum sa_path_rec_type {
 };
 
 struct sa_path_rec_ib {
-       __be64       service_id;
        __be16       dlid;
        __be16       slid;
        u8           raw_traffic;
@@ -174,7 +173,6 @@ struct sa_path_rec_roce {
 };
 
 struct sa_path_rec_opa {
-       __be64       service_id;
        __be32       dlid;
        __be32       slid;
        u8           raw_traffic;
@@ -189,6 +187,7 @@ struct sa_path_rec_opa {
 struct sa_path_rec {
        union ib_gid dgid;
        union ib_gid sgid;
+       __be64       service_id;
        /* reserved */
        __be32       flow_label;
        u8           hop_limit;
@@ -262,7 +261,7 @@ static inline void path_conv_opa_to_ib(struct sa_path_rec *ib,
                ib->ib.dlid     = htons(ntohl(opa->opa.dlid));
                ib->ib.slid     = htons(ntohl(opa->opa.slid));
        }
-       ib->ib.service_id       = opa->opa.service_id;
+       ib->service_id          = opa->service_id;
        ib->ib.raw_traffic      = opa->opa.raw_traffic;
 }
 
@@ -281,7 +280,7 @@ static inline void path_conv_ib_to_opa(struct sa_path_rec *opa,
        }
        opa->opa.slid           = slid;
        opa->opa.dlid           = dlid;
-       opa->opa.service_id     = ib->ib.service_id;
+       opa->service_id         = ib->service_id;
        opa->opa.raw_traffic    = ib->ib.raw_traffic;
 }
 
@@ -591,15 +590,6 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
                (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
 }
 
-static inline void sa_path_set_service_id(struct sa_path_rec *rec,
-                                         __be64 service_id)
-{
-       if (rec->rec_type == SA_PATH_REC_TYPE_IB)
-               rec->ib.service_id = service_id;
-       else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
-               rec->opa.service_id = service_id;
-}
-
 static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
 {
        if (rec->rec_type == SA_PATH_REC_TYPE_IB)
@@ -625,15 +615,6 @@ static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec,
                rec->opa.raw_traffic = raw_traffic;
 }
 
-static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec)
-{
-       if (rec->rec_type == SA_PATH_REC_TYPE_IB)
-               return rec->ib.service_id;
-       else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
-               return rec->opa.service_id;
-       return 0;
-}
-
 static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
 {
        if (rec->rec_type == SA_PATH_REC_TYPE_IB)
index 5852661443290d52eb8a3e719716452d752b6eaa..348c102cb5f6afdbe9f90a7c788431bac0219226 100644 (file)
@@ -10,9 +10,6 @@ struct ibnl_client_cbs {
        struct module *module;
 };
 
-int ibnl_init(void);
-void ibnl_cleanup(void);
-
 /**
  * Add a a client to the list of IB netlink exporters.
  * @index: Index of the added client
@@ -77,11 +74,4 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
 int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
                        unsigned int group, gfp_t flags);
 
-/**
- * Check if there are any listeners to the netlink group
- * @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
- */
-int ibnl_chk_listeners(unsigned int group);
-
 #endif /* _RDMA_NETLINK_H */
index 275581d483ddd90d97c550ee8bf44d705833ecf8..5f17fb770477bbdfa2729a7b35cf21f70493515e 100644 (file)
@@ -557,6 +557,7 @@ struct iscsi_conn {
 #define LOGIN_FLAGS_READ_ACTIVE                1
 #define LOGIN_FLAGS_CLOSED             2
 #define LOGIN_FLAGS_READY              4
+#define LOGIN_FLAGS_INITIAL_PDU                8
        unsigned long           login_flags;
        struct delayed_work     login_work;
        struct delayed_work     login_cleanup_work;
index c3c9a0e1b3c9a474bd80b8cb10ea1049284474b0..8d4e85eae42c08481899e415075ee42c6d12f90f 100644 (file)
@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css)
 {
        lockdep_assert_held(&cgroup_mutex);
 
+       if (css->flags & CSS_DYING)
+               return;
+
+       css->flags |= CSS_DYING;
+
        /*
         * This must happen before css is disassociated with its cgroup.
         * See seq_css() for details.
index f6501f4f6040b5a9c21e84aeb57e20906ef1c614..ae643412948added94f05d7efb120631f7798b44 100644 (file)
@@ -176,9 +176,9 @@ typedef enum {
 } cpuset_flagbits_t;
 
 /* convenient tests for these bits */
-static inline bool is_cpuset_online(const struct cpuset *cs)
+static inline bool is_cpuset_online(struct cpuset *cs)
 {
-       return test_bit(CS_ONLINE, &cs->flags);
+       return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
 }
 
 static inline int is_cpu_exclusive(const struct cpuset *cs)
index 0450225579367eee10de6695679fffa7b963f013..ec4565122e6553f490dfb2434b6c14102b152bb2 100644 (file)
@@ -10,6 +10,7 @@ config LIVEPATCH
        depends on SYSFS
        depends on KALLSYMS_ALL
        depends on HAVE_LIVEPATCH
+       depends on !TRIM_UNUSED_KSYMS
        help
          Say Y here if you want to support kernel live patching.
          This option has no runtime impact until a kernel "patch"
index 78672d324a6ef95394ad72a0b0ba29c7d1155d5d..c7209f060eeb7c8672cf8f07ba9c83ac6c9460ac 100644 (file)
@@ -132,7 +132,7 @@ int freeze_processes(void)
        if (!pm_freezing)
                atomic_inc(&system_freezing_cnt);
 
-       pm_wakeup_clear(true);
+       pm_wakeup_clear();
        pr_info("Freezing user space processes ... ");
        pm_freezing = true;
        error = try_to_freeze_tasks(true);
index c0248c74d6d4cef6dbf09f485f36686862c29094..15e6baef5c73f90b6817c0b1c4e871ea40e30318 100644 (file)
@@ -72,8 +72,6 @@ static void freeze_begin(void)
 
 static void freeze_enter(void)
 {
-       trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
-
        spin_lock_irq(&suspend_freeze_lock);
        if (pm_wakeup_pending())
                goto out;
@@ -100,27 +98,6 @@ static void freeze_enter(void)
  out:
        suspend_freeze_state = FREEZE_STATE_NONE;
        spin_unlock_irq(&suspend_freeze_lock);
-
-       trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
-}
-
-static void s2idle_loop(void)
-{
-       do {
-               freeze_enter();
-
-               if (freeze_ops && freeze_ops->wake)
-                       freeze_ops->wake();
-
-               dpm_resume_noirq(PMSG_RESUME);
-               if (freeze_ops && freeze_ops->sync)
-                       freeze_ops->sync();
-
-               if (pm_wakeup_pending())
-                       break;
-
-               pm_wakeup_clear(false);
-       } while (!dpm_suspend_noirq(PMSG_SUSPEND));
 }
 
 void freeze_wake(void)
@@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
         * all the devices are suspended.
         */
        if (state == PM_SUSPEND_FREEZE) {
-               s2idle_loop();
-               goto Platform_early_resume;
+               trace_suspend_resume(TPS("machine_suspend"), state, true);
+               freeze_enter();
+               trace_suspend_resume(TPS("machine_suspend"), state, false);
+               goto Platform_wake;
        }
 
        error = disable_nonboot_cpus();
index a1aecf44ab07c70ab9f33d455646559313926344..a1db38abac5b750e8ce228b441b27900360665ec 100644 (file)
@@ -269,7 +269,6 @@ static struct console *exclusive_console;
 #define MAX_CMDLINECONSOLES 8
 
 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
-static int console_cmdline_cnt;
 
 static int preferred_console = -1;
 int console_set_on_cmdline;
@@ -1906,25 +1905,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
         *      See if this tty is not yet registered, and
         *      if we have a slot free.
         */
-       for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) {
+       for (i = 0, c = console_cmdline;
+            i < MAX_CMDLINECONSOLES && c->name[0];
+            i++, c++) {
                if (strcmp(c->name, name) == 0 && c->index == idx) {
-                       if (brl_options)
-                               return 0;
-
-                       /*
-                        * Maintain an invariant that will help to find if
-                        * the matching console is preferred, see
-                        * register_console():
-                        *
-                        * The last non-braille console is always
-                        * the preferred one.
-                        */
-                       if (i != console_cmdline_cnt - 1)
-                               swap(console_cmdline[i],
-                                    console_cmdline[console_cmdline_cnt - 1]);
-
-                       preferred_console = console_cmdline_cnt - 1;
-
+                       if (!brl_options)
+                               preferred_console = i;
                        return 0;
                }
        }
@@ -1937,7 +1923,6 @@ static int __add_preferred_console(char *name, int idx, char *options,
        braille_set_options(c, brl_options);
 
        c->index = idx;
-       console_cmdline_cnt++;
        return 0;
 }
 /*
@@ -2477,23 +2462,12 @@ void register_console(struct console *newcon)
        }
 
        /*
-        * See if this console matches one we selected on the command line.
-        *
-        * There may be several entries in the console_cmdline array matching
-        * with the same console, one with newcon->match(), another by
-        * name/index:
-        *
-        *      pl011,mmio,0x87e024000000,115200 -- added from SPCR
-        *      ttyAMA0 -- added from command line
-        *
-        * Traverse the console_cmdline array in reverse order to be
-        * sure that if this console is preferred then it will be the first
-        * matching entry.  We use the invariant that is maintained in
-        * __add_preferred_console().
+        *      See if this console matches one we selected on
+        *      the command line.
         */
-       for (i = console_cmdline_cnt - 1; i >= 0; i--) {
-               c = console_cmdline + i;
-
+       for (i = 0, c = console_cmdline;
+            i < MAX_CMDLINECONSOLES && c->name[0];
+            i++, c++) {
                if (!newcon->match ||
                    newcon->match(newcon, c->name, c->index, c->options) != 0) {
                        /* default matching */
index d9e6fddcc51f06a1286c56a24c510c1a3efa8add..b3c7214d710d5ea8bab8648b5182c53d882f3c31 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -407,12 +407,10 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
 
        ret = handle_mm_fault(vma, address, fault_flags);
        if (ret & VM_FAULT_ERROR) {
-               if (ret & VM_FAULT_OOM)
-                       return -ENOMEM;
-               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
-                       return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
-               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
-                       return -EFAULT;
+               int err = vm_fault_to_errno(ret, *flags);
+
+               if (err)
+                       return err;
                BUG();
        }
 
@@ -723,12 +721,10 @@ retry:
        ret = handle_mm_fault(vma, address, fault_flags);
        major |= ret & VM_FAULT_MAJOR;
        if (ret & VM_FAULT_ERROR) {
-               if (ret & VM_FAULT_OOM)
-                       return -ENOMEM;
-               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
-                       return -EHWPOISON;
-               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
-                       return -EFAULT;
+               int err = vm_fault_to_errno(ret, 0);
+
+               if (err)
+                       return err;
                BUG();
        }
 
index e5828875f7bbd7a770d5c23334a0e3994ffe544f..3eedb187e5496f36f7f3186267f475254bcda5ab 100644 (file)
@@ -4170,6 +4170,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        }
                        ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
                        if (ret & VM_FAULT_ERROR) {
+                               int err = vm_fault_to_errno(ret, flags);
+
+                               if (err)
+                                       return err;
+
                                remainder = 0;
                                break;
                        }
index d9fc0e4561283d9a351f6dd7c4cfb74ad26ab566..216184af0e192b5405efc5a594129fa7c53ae953 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1028,8 +1028,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
                goto out;
 
        if (PageTransCompound(page)) {
-               err = split_huge_page(page);
-               if (err)
+               if (split_huge_page(page))
                        goto out_unlock;
        }
 
index b049c9b2dba8718a6f57777591c2f2753d8d40ad..7b8a5db76a2fec7331f09048f3c19ebdfddf9f16 100644 (file)
@@ -1739,6 +1739,29 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
        }
 }
 
+extern unsigned long __init_memblock
+memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
+{
+       struct memblock_region *rgn;
+       unsigned long size = 0;
+       int idx;
+
+       for_each_memblock_type((&memblock.reserved), rgn) {
+               phys_addr_t start, end;
+
+               if (rgn->base + rgn->size < start_addr)
+                       continue;
+               if (rgn->base > end_addr)
+                       continue;
+
+               start = rgn->base;
+               end = start + rgn->size;
+               size += end - start;
+       }
+
+       return size;
+}
+
 void __init_memblock __memblock_dump_all(void)
 {
        pr_info("MEMBLOCK configuration:\n");
index 2527dfeddb003d245ac2e2bd964134030426f777..342fac9ba89b0da3e207b1fdaef2be71c9837a24 100644 (file)
@@ -1595,12 +1595,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
        if (ret) {
                pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
                        pfn, ret, page->flags, &page->flags);
-               /*
-                * We know that soft_offline_huge_page() tries to migrate
-                * only one hugepage pointed to by hpage, so we need not
-                * run through the pagelist here.
-                */
-               putback_active_hugepage(hpage);
+               if (!list_empty(&pagelist))
+                       putback_movable_pages(&pagelist);
                if (ret > 0)
                        ret = -EIO;
        } else {
index 6ff5d729ded0ecd3a5607d10248697a786091f7e..2e65df1831d941dcd1282c56312bdbd153df0a79 100644 (file)
@@ -3029,6 +3029,17 @@ static int __do_fault(struct vm_fault *vmf)
        return ret;
 }
 
+/*
+ * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
+ * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
+ * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
+ * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
+ */
+static int pmd_devmap_trans_unstable(pmd_t *pmd)
+{
+       return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
+}
+
 static int pte_alloc_one_map(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
@@ -3052,18 +3063,27 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
 map_pte:
        /*
         * If a huge pmd materialized under us just retry later.  Use
-        * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
-        * didn't become pmd_trans_huge under us and then back to pmd_none, as
-        * a result of MADV_DONTNEED running immediately after a huge pmd fault
-        * in a different thread of this mm, in turn leading to a misleading
-        * pmd_trans_huge() retval.  All we have to ensure is that it is a
-        * regular pmd that we can walk with pte_offset_map() and we can do that
-        * through an atomic read in C, which is what pmd_trans_unstable()
-        * provides.
+        * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
+        * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
+        * under us and then back to pmd_none, as a result of MADV_DONTNEED
+        * running immediately after a huge pmd fault in a different thread of
+        * this mm, in turn leading to a misleading pmd_trans_huge() retval.
+        * All we have to ensure is that it is a regular pmd that we can walk
+        * with pte_offset_map() and we can do that through an atomic read in
+        * C, which is what pmd_trans_unstable() provides.
         */
-       if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
+       if (pmd_devmap_trans_unstable(vmf->pmd))
                return VM_FAULT_NOPAGE;
 
+       /*
+        * At this point we know that our vmf->pmd points to a page of ptes
+        * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
+        * for the duration of the fault.  If a racing MADV_DONTNEED runs and
+        * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
+        * be valid and we will re-check to make sure the vmf->pte isn't
+        * pte_none() under vmf->ptl protection when we return to
+        * alloc_set_pte().
+        */
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
                        &vmf->ptl);
        return 0;
@@ -3690,7 +3710,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
                vmf->pte = NULL;
        } else {
                /* See comment in pte_alloc_one_map() */
-               if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
+               if (pmd_devmap_trans_unstable(vmf->pmd))
                        return 0;
                /*
                 * A regular pmd is established and it can't morph into a huge
index c483c5c20b4bd12bcca50972c9f74a0dbd3a713e..b562b5523a6544e6c0ae6e4f792943441f6217a3 100644 (file)
@@ -284,7 +284,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
 {
        int i;
        int nr = pagevec_count(pvec);
-       int delta_munlocked;
+       int delta_munlocked = -nr;
        struct pagevec pvec_putback;
        int pgrescued = 0;
 
@@ -304,6 +304,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                                continue;
                        else
                                __munlock_isolation_failed(page);
+               } else {
+                       delta_munlocked++;
                }
 
                /*
@@ -315,7 +317,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                pagevec_add(&pvec_putback, pvec->pages[i]);
                pvec->pages[i] = NULL;
        }
-       delta_munlocked = -nr + pagevec_count(&pvec_putback);
        __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
        spin_unlock_irq(zone_lru_lock(zone));
 
index f9e450c6b6e414d61b00d5a61be9cdea3b773e1b..2302f250d6b1ba150e3c2e4e17cfb6c99574ab5b 100644 (file)
@@ -292,6 +292,26 @@ int page_group_by_mobility_disabled __read_mostly;
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 static inline void reset_deferred_meminit(pg_data_t *pgdat)
 {
+       unsigned long max_initialise;
+       unsigned long reserved_lowmem;
+
+       /*
+        * Initialise at least 2G of a node but also take into account that
+        * two large system hashes that can take up 1GB for 0.25TB/node.
+        */
+       max_initialise = max(2UL << (30 - PAGE_SHIFT),
+               (pgdat->node_spanned_pages >> 8));
+
+       /*
+        * Compensate the all the memblock reservations (e.g. crash kernel)
+        * from the initial estimation to make sure we will initialize enough
+        * memory to boot.
+        */
+       reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+                       pgdat->node_start_pfn + max_initialise);
+       max_initialise += reserved_lowmem;
+
+       pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
        pgdat->first_deferred_pfn = ULONG_MAX;
 }
 
@@ -314,20 +334,11 @@ static inline bool update_defer_init(pg_data_t *pgdat,
                                unsigned long pfn, unsigned long zone_end,
                                unsigned long *nr_initialised)
 {
-       unsigned long max_initialise;
-
        /* Always populate low zones for address-contrained allocations */
        if (zone_end < pgdat_end_pfn(pgdat))
                return true;
-       /*
-        * Initialise at least 2G of a node but also take into account that
-        * two large system hashes that can take up 1GB for 0.25TB/node.
-        */
-       max_initialise = max(2UL << (30 - PAGE_SHIFT),
-               (pgdat->node_spanned_pages >> 8));
-
        (*nr_initialised)++;
-       if ((*nr_initialised > max_initialise) &&
+       if ((*nr_initialised > pgdat->static_init_size) &&
            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
                pgdat->first_deferred_pfn = pfn;
                return false;
@@ -3870,7 +3881,9 @@ retry:
                goto got_pg;
 
        /* Avoid allocations with no watermarks from looping endlessly */
-       if (test_thread_flag(TIF_MEMDIE))
+       if (test_thread_flag(TIF_MEMDIE) &&
+           (alloc_flags == ALLOC_NO_WATERMARKS ||
+            (gfp_mask & __GFP_NOMEMALLOC)))
                goto nopage;
 
        /* Retry as long as the OOM killer is making progress */
@@ -6136,7 +6149,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
        /* pg_data_t should be reset to zero when it's allocated */
        WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
 
-       reset_deferred_meminit(pgdat);
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
        pgdat->per_cpu_nodestats = NULL;
@@ -6158,6 +6170,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
                (unsigned long)pgdat->node_mem_map);
 #endif
 
+       reset_deferred_meminit(pgdat);
        free_area_init_core(pgdat);
 }
 
index 57e5156f02be6bcc23e70ec801e9cc1c3bbdd631..7449593fca724147cef5b8f7a46752333e5e0585 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5512,6 +5512,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
                char mbuf[64];
                char *buf;
                struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+               ssize_t len;
 
                if (!attr || !attr->store || !attr->show)
                        continue;
@@ -5536,8 +5537,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
                        buf = buffer;
                }
 
-               attr->show(root_cache, buf);
-               attr->store(s, buf, strlen(buf));
+               len = attr->show(root_cache, buf);
+               if (len > 0)
+                       attr->store(s, buf, len);
        }
 
        if (buffer)
index 464df34899031d46058b7cbadc1c3be24ae8dc89..26be6407abd7efe452a585d341a8d7e5b53d2b32 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -357,8 +357,11 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
        WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
 
        /*
-        * Make sure that larger requests are not too disruptive - no OOM
-        * killer and no allocation failure warnings as we have a fallback
+        * We want to attempt a large physically contiguous block first because
+        * it is less likely to fragment multiple larger blocks and therefore
+        * contribute to a long term fragmentation less than vmalloc fallback.
+        * However make sure that larger requests are not too disruptive - no
+        * OOM killer and no allocation failure warnings as we have a fallback.
         */
        if (size > PAGE_SIZE) {
                kmalloc_flags |= __GFP_NOWARN;
index 574f78824d8a2ae53751bbe1849e53502bc575be..32bd3ead9ba14a0b42bda3fc959f9134a8c9cc36 100644 (file)
@@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br,
                err = 0;
                switch (nla_type(attr)) {
                case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
-                       if (!(p->flags & BR_VLAN_TUNNEL))
+                       if (!p || !(p->flags & BR_VLAN_TUNNEL))
                                return -EINVAL;
                        err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
                        if (err)
index 0db8102995a506d64ece0de48b7266ccf3839ba8..6f12a5271219f071ed2d0bacb263561cb9e0f605 100644 (file)
@@ -179,7 +179,8 @@ static void br_stp_start(struct net_bridge *br)
                br_debug(br, "using kernel STP\n");
 
                /* To start timers on any ports left in blocking */
-               mod_timer(&br->hello_timer, jiffies + br->hello_time);
+               if (br->dev->flags & IFF_UP)
+                       mod_timer(&br->hello_timer, jiffies + br->hello_time);
                br_port_state_selection(br);
        }
 
index b0b87a292e7ccac2221a2425510b3c28e1df97f7..a0adfc31a3fe854cd52600f6b2924255aee521c2 100644 (file)
@@ -1680,8 +1680,10 @@ start_again:
 
        hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
                          &devlink_nl_family, NLM_F_MULTI, cmd);
-       if (!hdr)
+       if (!hdr) {
+               nlmsg_free(skb);
                return -EMSGSIZE;
+       }
 
        if (devlink_nl_put_handle(skb, devlink))
                goto nla_put_failure;
@@ -2098,8 +2100,10 @@ start_again:
 
        hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
                          &devlink_nl_family, NLM_F_MULTI, cmd);
-       if (!hdr)
+       if (!hdr) {
+               nlmsg_free(skb);
                return -EMSGSIZE;
+       }
 
        if (devlink_nl_put_handle(skb, devlink))
                goto nla_put_failure;
index 346d3e85dfbc2eca1ded0442ecb78d31e1768523..b1be7c01efe269d2bc97be7dcd1cc5485d29fa7b 100644 (file)
@@ -3754,8 +3754,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
 
        spin_lock_irqsave(&q->lock, flags);
        skb = __skb_dequeue(q);
-       if (skb && (skb_next = skb_peek(q)))
+       if (skb && (skb_next = skb_peek(q))) {
                icmp_next = is_icmp_err_skb(skb_next);
+               if (icmp_next)
+                       sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
+       }
        spin_unlock_irqrestore(&q->lock, flags);
 
        if (is_icmp_err_skb(skb) && !icmp_next)
index 26130ae438da53f3f99a4e9fa712a572f40ca779..90038d45a54764df55017b46258dc772b0af1c96 100644 (file)
@@ -223,6 +223,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+int dsa_switch_suspend(struct dsa_switch *ds)
+{
+       int i, ret = 0;
+
+       /* Suspend slave network devices */
+       for (i = 0; i < ds->num_ports; i++) {
+               if (!dsa_is_port_initialized(ds, i))
+                       continue;
+
+               ret = dsa_slave_suspend(ds->ports[i].netdev);
+               if (ret)
+                       return ret;
+       }
+
+       if (ds->ops->suspend)
+               ret = ds->ops->suspend(ds);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_suspend);
+
+int dsa_switch_resume(struct dsa_switch *ds)
+{
+       int i, ret = 0;
+
+       if (ds->ops->resume)
+               ret = ds->ops->resume(ds);
+
+       if (ret)
+               return ret;
+
+       /* Resume slave network devices */
+       for (i = 0; i < ds->num_ports; i++) {
+               if (!dsa_is_port_initialized(ds, i))
+                       continue;
+
+               ret = dsa_slave_resume(ds->ports[i].netdev);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_resume);
+#endif
+
 static struct packet_type dsa_pack_type __read_mostly = {
        .type   = cpu_to_be16(ETH_P_XDSA),
        .func   = dsa_switch_rcv,
index 033b3bfb63dc1887b15b3e08f00a00f70b706ec4..7796580e99ee2c57cdd5503130c8ec2e121d879a 100644 (file)
@@ -484,8 +484,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
                dsa_ds_unapply(dst, ds);
        }
 
-       if (dst->cpu_switch)
+       if (dst->cpu_switch) {
                dsa_cpu_port_ethtool_restore(dst->cpu_switch);
+               dst->cpu_switch = NULL;
+       }
 
        pr_info("DSA: tree %d unapplied\n", dst->tree);
        dst->applied = false;
index ad345c8b0b0693cc214b212ccbb402859d910134..7281098df04ecd597b7824a9c0d2ec1d7f60e2b9 100644 (file)
@@ -289,53 +289,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
        dsa_switch_unregister_notifier(ds);
 }
 
-#ifdef CONFIG_PM_SLEEP
-int dsa_switch_suspend(struct dsa_switch *ds)
-{
-       int i, ret = 0;
-
-       /* Suspend slave network devices */
-       for (i = 0; i < ds->num_ports; i++) {
-               if (!dsa_is_port_initialized(ds, i))
-                       continue;
-
-               ret = dsa_slave_suspend(ds->ports[i].netdev);
-               if (ret)
-                       return ret;
-       }
-
-       if (ds->ops->suspend)
-               ret = ds->ops->suspend(ds);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(dsa_switch_suspend);
-
-int dsa_switch_resume(struct dsa_switch *ds)
-{
-       int i, ret = 0;
-
-       if (ds->ops->resume)
-               ret = ds->ops->resume(ds);
-
-       if (ret)
-               return ret;
-
-       /* Resume slave network devices */
-       for (i = 0; i < ds->num_ports; i++) {
-               if (!dsa_is_port_initialized(ds, i))
-                       continue;
-
-               ret = dsa_slave_resume(ds->ports[i].netdev);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(dsa_switch_resume);
-#endif
-
 /* platform driver init and cleanup *****************************************/
 static int dev_is_class(struct device *dev, void *class)
 {
index f3dad16613437c0c7ac3e9c7518a0929cddb3ca7..58925b6597de83e7d643fb9b1c7e992c9748ae1c 100644 (file)
@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
                .type =       SOCK_DGRAM,
                .protocol =   IPPROTO_ICMP,
                .prot =       &ping_prot,
-               .ops =        &inet_dgram_ops,
+               .ops =        &inet_sockraw_ops,
                .flags =      INET_PROTOSW_REUSE,
        },
 
index 59792d283ff8c19048904cb790dbeebef14da73d..b5ea036ca78144b86622cb0944d0b840f7225ec5 100644 (file)
@@ -2381,9 +2381,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
        return 0;
 }
 
-static int tcp_repair_options_est(struct tcp_sock *tp,
+static int tcp_repair_options_est(struct sock *sk,
                struct tcp_repair_opt __user *optbuf, unsigned int len)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_repair_opt opt;
 
        while (len >= sizeof(opt)) {
@@ -2396,6 +2397,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
                switch (opt.opt_code) {
                case TCPOPT_MSS:
                        tp->rx_opt.mss_clamp = opt.opt_val;
+                       tcp_mtup_init(sk);
                        break;
                case TCPOPT_WINDOW:
                        {
@@ -2555,7 +2557,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                if (!tp->repair)
                        err = -EINVAL;
                else if (sk->sk_state == TCP_ESTABLISHED)
-                       err = tcp_repair_options_est(tp,
+                       err = tcp_repair_options_est(sk,
                                        (struct tcp_repair_opt __user *)optval,
                                        optlen);
                else
index 6e3c512054a60715e8e2d16ffedd12cba6a3d2d9..324c9bcc5456b499b59cef40838b4e9829119e13 100644 (file)
@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
+       tcp_sk(sk)->prior_ssthresh = 0;
        if (icsk->icsk_ca_ops->init)
                icsk->icsk_ca_ops->init(sk);
        if (tcp_ca_needs_ecn(sk))
index 37ac9de713c69af30ae50d03e53ee472a7520b98..8d772fea1ddecd427a66c18f34d50f969186f02a 100644 (file)
@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
        struct ipv6hdr *ip6_hdr;
        struct ipv6_opt_hdr *hop;
        unsigned char buf[CALIPSO_MAX_BUFFER];
-       int len_delta, new_end, pad;
+       int len_delta, new_end, pad, payload;
        unsigned int start, end;
 
        ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
        if (ret_val < 0)
                return ret_val;
 
+       ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
+
        if (len_delta) {
                if (len_delta > 0)
                        skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
                        sizeof(*ip6_hdr) + start);
                skb_reset_network_header(skb);
                ip6_hdr = ipv6_hdr(skb);
+               payload = ntohs(ip6_hdr->payload_len);
+               ip6_hdr->payload_len = htons(payload + len_delta);
        }
 
        hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
index 280268f1dd7b0972d7fadbcc9e28b043ceae423d..cdb3728faca7746d91e2430f6024f060a82b24fd 100644 (file)
@@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
                if (udpfrag) {
                        int err = ip6_find_1stfragopt(skb, &prevhdr);
-                       if (err < 0)
+                       if (err < 0) {
+                               kfree_skb_list(segs);
                                return ERR_PTR(err);
+                       }
                        fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
                        fptr->frag_off = htons(offset);
                        if (skb->next)
index 7ae6c503f1ca2b089388598bfceb43e4aa2d2fea..9b37f9747fc6a6fbabb0740188bc98b5c95c41c4 100644 (file)
@@ -1095,6 +1095,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
 
        if (!dst) {
 route_lookup:
+               /* add dsfield to flowlabel for route lookup */
+               fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
+
                dst = ip6_route_output(net, NULL, fl6);
 
                if (dst->error)
index 9b522fa90e6d8f4a87ebed7cf574a36ceea89c61..ac826dd338ff0825eaf0d2d74cee92d008e018bb 100644 (file)
@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
        .type =      SOCK_DGRAM,
        .protocol =  IPPROTO_ICMPV6,
        .prot =      &pingv6_prot,
-       .ops =       &inet6_dgram_ops,
+       .ops =       &inet6_sockraw_ops,
        .flags =     INET_PROTOSW_REUSE,
 };
 
index 1f992d9e261d8b75226659a4cead95f8dc04dc4f..60be012fe7085cc7a199e84333cef5ee95ed1f04 100644 (file)
@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
 #endif /* CONFIG_PROC_FS */
 
 /* Same as inet6_dgram_ops, sans udp_poll.  */
-static const struct proto_ops inet6_sockraw_ops = {
+const struct proto_ops inet6_sockraw_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
        .release           = inet6_release,
index 0e015906f9ca91e11d3e9e124c532c6a7cb04c5d..07d36573f50b9451e4c2bfee331ac2c023791a7a 100644 (file)
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
        iph = ipv6_hdr(skb);
 
        hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+       if (hdr_len < 0)
+               return hdr_len;
        skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
        skb_set_network_header(skb, -x->props.header_len);
        skb->transport_header = skb->network_header + hdr_len;
index 7a92c0f3191250118ce3572ca91ee9116460bce8..9ad07a91708ef7a1008d469766ab39b9b882883f 100644 (file)
@@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
        skb_set_inner_transport_header(skb, skb_transport_offset(skb));
 
        hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+       if (hdr_len < 0)
+               return hdr_len;
        skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
        skb_set_network_header(skb, -x->props.header_len);
        skb->transport_header = skb->network_header + hdr_len;
index 60e2a62f7bef2fbb014f3a40403cf498a75d429c..cf2392b2ac717972e4354346fd086ec802370de3 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -741,46 +741,43 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
        ieee80211_agg_start_txq(sta, tid, true);
 }
 
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
+void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
+                             struct tid_ampdu_tx *tid_tx)
 {
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
        struct ieee80211_local *local = sdata->local;
-       struct sta_info *sta;
-       struct tid_ampdu_tx *tid_tx;
 
-       trace_api_start_tx_ba_cb(sdata, ra, tid);
+       if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
+               return;
+
+       if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
+               ieee80211_agg_tx_operational(local, sta, tid);
+}
+
+static struct tid_ampdu_tx *
+ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
+                       const u8 *ra, u16 tid, struct sta_info **sta)
+{
+       struct tid_ampdu_tx *tid_tx;
 
        if (tid >= IEEE80211_NUM_TIDS) {
                ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
                       tid, IEEE80211_NUM_TIDS);
-               return;
+               return NULL;
        }
 
-       mutex_lock(&local->sta_mtx);
-       sta = sta_info_get_bss(sdata, ra);
-       if (!sta) {
-               mutex_unlock(&local->sta_mtx);
+       *sta = sta_info_get_bss(sdata, ra);
+       if (!*sta) {
                ht_dbg(sdata, "Could not find station: %pM\n", ra);
-               return;
+               return NULL;
        }
 
-       mutex_lock(&sta->ampdu_mlme.mtx);
-       tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+       tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
 
-       if (WARN_ON(!tid_tx)) {
+       if (WARN_ON(!tid_tx))
                ht_dbg(sdata, "addBA was not requested!\n");
-               goto unlock;
-       }
 
-       if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
-               goto unlock;
-
-       if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
-               ieee80211_agg_tx_operational(local, sta, tid);
-
- unlock:
-       mutex_unlock(&sta->ampdu_mlme.mtx);
-       mutex_unlock(&local->sta_mtx);
+       return tid_tx;
 }
 
 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -788,19 +785,20 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_ra_tid *ra_tid;
-       struct sk_buff *skb = dev_alloc_skb(0);
+       struct sta_info *sta;
+       struct tid_ampdu_tx *tid_tx;
 
-       if (unlikely(!skb))
-               return;
+       trace_api_start_tx_ba_cb(sdata, ra, tid);
 
-       ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-       memcpy(&ra_tid->ra, ra, ETH_ALEN);
-       ra_tid->tid = tid;
+       rcu_read_lock();
+       tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
+       if (!tid_tx)
+               goto out;
 
-       skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
-       skb_queue_tail(&sdata->skb_queue, skb);
-       ieee80211_queue_work(&local->hw, &sdata->work);
+       set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
+       ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ out:
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
 
@@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
 }
 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
 
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
+void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
+                            struct tid_ampdu_tx *tid_tx)
 {
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-       struct ieee80211_local *local = sdata->local;
-       struct sta_info *sta;
-       struct tid_ampdu_tx *tid_tx;
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
        bool send_delba = false;
 
-       trace_api_stop_tx_ba_cb(sdata, ra, tid);
-
-       if (tid >= IEEE80211_NUM_TIDS) {
-               ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
-                      tid, IEEE80211_NUM_TIDS);
-               return;
-       }
-
-       ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
-
-       mutex_lock(&local->sta_mtx);
-
-       sta = sta_info_get_bss(sdata, ra);
-       if (!sta) {
-               ht_dbg(sdata, "Could not find station: %pM\n", ra);
-               goto unlock;
-       }
+       ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
+              sta->sta.addr, tid);
 
-       mutex_lock(&sta->ampdu_mlme.mtx);
        spin_lock_bh(&sta->lock);
-       tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 
-       if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+       if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
                ht_dbg(sdata,
                       "unexpected callback to A-MPDU stop for %pM tid %d\n",
                       sta->sta.addr, tid);
@@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
        spin_unlock_bh(&sta->lock);
 
        if (send_delba)
-               ieee80211_send_delba(sdata, ra, tid,
+               ieee80211_send_delba(sdata, sta->sta.addr, tid,
                        WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
-
-       mutex_unlock(&sta->ampdu_mlme.mtx);
- unlock:
-       mutex_unlock(&local->sta_mtx);
 }
 
 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_ra_tid *ra_tid;
-       struct sk_buff *skb = dev_alloc_skb(0);
+       struct sta_info *sta;
+       struct tid_ampdu_tx *tid_tx;
 
-       if (unlikely(!skb))
-               return;
+       trace_api_stop_tx_ba_cb(sdata, ra, tid);
 
-       ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-       memcpy(&ra_tid->ra, ra, ETH_ALEN);
-       ra_tid->tid = tid;
+       rcu_read_lock();
+       tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
+       if (!tid_tx)
+               goto out;
 
-       skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
-       skb_queue_tail(&sdata->skb_queue, skb);
-       ieee80211_queue_work(&local->hw, &sdata->work);
+       set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
+       ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ out:
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
 
index f4a52877356349abed96d0a77d6ae75f301181e4..6ca5442b1e03b18aa81764089bf74c002d337690 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
+ * Copyright 2017      Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
 {
        int i;
 
-       cancel_work_sync(&sta->ampdu_mlme.work);
-
        for (i = 0; i <  IEEE80211_NUM_TIDS; i++) {
                __ieee80211_stop_tx_ba_session(sta, i, reason);
                __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
                                               reason != AGG_STOP_DESTROY_STA &&
                                               reason != AGG_STOP_PEER_REQUEST);
        }
+
+       /* stopping might queue the work again - so cancel only afterwards */
+       cancel_work_sync(&sta->ampdu_mlme.work);
 }
 
 void ieee80211_ba_session_work(struct work_struct *work)
@@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
                spin_unlock_bh(&sta->lock);
 
                tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-               if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
-                                                &tid_tx->state))
+               if (!tid_tx)
+                       continue;
+
+               if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
+                       ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
+               if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
                        ___ieee80211_stop_tx_ba_session(sta, tid,
                                                        AGG_STOP_LOCAL_REQUEST);
+               if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
+                       ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
        }
        mutex_unlock(&sta->ampdu_mlme.mtx);
 }
index f8f6c148f5545feeb78c16ca6f33ebf5e02d0ab5..665501ac358f8d83630f2727fe6249dcfeeb9689 100644 (file)
@@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg {
 
 enum sdata_queue_type {
        IEEE80211_SDATA_QUEUE_TYPE_FRAME        = 0,
-       IEEE80211_SDATA_QUEUE_AGG_START         = 1,
-       IEEE80211_SDATA_QUEUE_AGG_STOP          = 2,
        IEEE80211_SDATA_QUEUE_RX_AGG_START      = 3,
        IEEE80211_SDATA_QUEUE_RX_AGG_STOP       = 4,
 };
@@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
        return local->hw.wiphy->bands[band];
 }
 
-/* this struct represents 802.11n's RA/TID combination */
-struct ieee80211_ra_tid {
-       u8 ra[ETH_ALEN];
-       u16 tid;
-};
-
 /* this struct holds the value parsing from channel switch IE  */
 struct ieee80211_csa_ie {
        struct cfg80211_chan_def chandef;
@@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                                   enum ieee80211_agg_stop_reason reason);
 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                                    enum ieee80211_agg_stop_reason reason);
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
+void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
+                             struct tid_ampdu_tx *tid_tx);
+void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
+                            struct tid_ampdu_tx *tid_tx);
 void ieee80211_ba_session_work(struct work_struct *work);
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
index 3bd5b81f5d81ec7d73686043c2683630e24ecde4..8fae1a72e6a7c7ea4f71ec3a3beb215b987a715f 100644 (file)
@@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work)
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
        struct sta_info *sta;
-       struct ieee80211_ra_tid *ra_tid;
        struct ieee80211_rx_agg *rx_agg;
 
        if (!ieee80211_sdata_running(sdata))
@@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work)
        while ((skb = skb_dequeue(&sdata->skb_queue))) {
                struct ieee80211_mgmt *mgmt = (void *)skb->data;
 
-               if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
-                       ra_tid = (void *)&skb->cb;
-                       ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
-                                                ra_tid->tid);
-               } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
-                       ra_tid = (void *)&skb->cb;
-                       ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
-                                               ra_tid->tid);
-               } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
+               if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
                        rx_agg = (void *)&skb->cb;
                        mutex_lock(&local->sta_mtx);
                        sta = sta_info_get_bss(sdata, rx_agg->addr);
index 7cdf7a835bb01e8fade3b9d9bb6efaa19158f72b..403e3cc58b573dc511c8ba399ddefdc7e8e24ba0 100644 (file)
@@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                        struct ieee80211_sta_rx_stats *cpurxs;
 
                        cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
-                       sinfo->rx_packets += cpurxs->dropped;
+                       sinfo->rx_dropped_misc += cpurxs->dropped;
                }
        }
 
index 5609cacb20d5f31e02ba877f88a6f0290e18ce8b..ea0747d6a6da194fec9116dc14b58a543f1accf2 100644 (file)
@@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags {
 #define HT_AGG_STATE_STOPPING          3
 #define HT_AGG_STATE_WANT_START                4
 #define HT_AGG_STATE_WANT_STOP         5
+#define HT_AGG_STATE_START_CB          6
+#define HT_AGG_STATE_STOP_CB           7
 
 enum ieee80211_agg_stop_reason {
        AGG_STOP_DECLINED,
index 257ec66009da2dd7010d063c0ad29dbb9a32564e..7b05fd1497ceddea47c2c7917f5ee58f6ff2d560 100644 (file)
@@ -1418,7 +1418,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
                                continue;
                        alive++;
                        nh_flags &= ~flags;
-                       WRITE_ONCE(nh->nh_flags, flags);
+                       WRITE_ONCE(nh->nh_flags, nh_flags);
                } endfor_nexthops(rt);
 
                WRITE_ONCE(rt->rt_nhn_alive, alive);
index 9799a50bc604cc630494514cc80aa49edc2def0e..a8be9b72e6cd2ca34166bba49a532f4f92e86e9e 100644 (file)
@@ -890,8 +890,13 @@ restart:
        }
 out:
        local_bh_enable();
-       if (last)
+       if (last) {
+               /* nf ct hash resize happened, now clear the leftover. */
+               if ((struct nf_conn *)cb->args[1] == last)
+                       cb->args[1] = 0;
+
                nf_ct_put(last);
+       }
 
        while (i) {
                i--;
index 13875d599a85713bbfeb2fee7b8978fa498a10ed..1c5b14a6cab369591bd13e22b284a0737ad75c2e 100644 (file)
@@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
                      u8 pf, unsigned int hooknum)
 {
        const struct sctphdr *sh;
-       struct sctphdr _sctph;
        const char *logmsg;
 
-       sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
-       if (!sh) {
+       if (skb->len < dataoff + sizeof(struct sctphdr)) {
                logmsg = "nf_ct_sctp: short packet ";
                goto out_invalid;
        }
        if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
            skb->ip_summed == CHECKSUM_NONE) {
+               if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
+                       logmsg = "nf_ct_sctp: failed to read header ";
+                       goto out_invalid;
+               }
+               sh = (const struct sctphdr *)(skb->data + dataoff);
                if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
                        logmsg = "nf_ct_sctp: bad CRC ";
                        goto out_invalid;
index ef0be325a0c6368bfe29ecda39db37dcb178a6d2..6c72922d20caee83f498cb02cdce0c46899a1c22 100644 (file)
@@ -566,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
         * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
         * will delete entry from already-freed table.
         */
-       ct->status &= ~IPS_NAT_DONE_MASK;
+       clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
        rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
                        nf_nat_bysource_params);
 
index e97e2fb53f0a107b0361322be10f16b4ab4b5d32..fbdbaa00dd5fd751f6ab8f428de987017b5bdf79 100644 (file)
@@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                else if (d > 0)
                        p = &parent->rb_right;
                else {
-                       if (nft_set_elem_active(&rbe->ext, genmask)) {
-                               if (nft_rbtree_interval_end(rbe) &&
-                                   !nft_rbtree_interval_end(new))
-                                       p = &parent->rb_left;
-                               else if (!nft_rbtree_interval_end(rbe) &&
-                                        nft_rbtree_interval_end(new))
-                                       p = &parent->rb_right;
-                               else {
-                                       *ext = &rbe->ext;
-                                       return -EEXIST;
-                               }
+                       if (nft_rbtree_interval_end(rbe) &&
+                           !nft_rbtree_interval_end(new)) {
+                               p = &parent->rb_left;
+                       } else if (!nft_rbtree_interval_end(rbe) &&
+                                  nft_rbtree_interval_end(new)) {
+                               p = &parent->rb_right;
+                       } else if (nft_set_elem_active(&rbe->ext, genmask)) {
+                               *ext = &rbe->ext;
+                               return -EEXIST;
+                       } else {
+                               p = &parent->rb_left;
                        }
                }
        }
index ee841f00a6ec715914fb14bb31dc8405f8dd4c4e..7586d446d7dcafc5c44b43190398840b68107d1f 100644 (file)
@@ -62,6 +62,7 @@
 #include <asm/cacheflush.h>
 #include <linux/hash.h>
 #include <linux/genetlink.h>
+#include <linux/net_namespace.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk,
                goto out;
        }
        NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
-       NETLINK_CB(p->skb2).nsid_is_set = true;
+       if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
+               NETLINK_CB(p->skb2).nsid_is_set = true;
        val = netlink_broadcast_deliver(sk, p->skb2);
        if (val < 0) {
                netlink_overrun(sk);
index 24fedd4b117e8f61cf500f629f2b47f2bc53e76f..03f6b5840764dcc2c486015edd8dc7de4cd84b26 100644 (file)
@@ -119,11 +119,9 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
 
        for (i = 0; i < (reqs << 1); i++) {
                rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
-               if (!rqst) {
-                       pr_err("RPC:       %s: Failed to create bc rpc_rqst\n",
-                              __func__);
+               if (!rqst)
                        goto out_free;
-               }
+
                dprintk("RPC:       %s: new rqst %p\n", __func__, rqst);
 
                rqst->rq_xprt = &r_xprt->rx_xprt;
index 16aff8ddc16f8f3e66e31a86ce227b3ac49857bf..d5b54c020decdc2665d671f34d74dd809aa6682a 100644 (file)
@@ -2432,7 +2432,12 @@ static void xs_tcp_setup_socket(struct work_struct *work)
        case -ENETUNREACH:
        case -EADDRINUSE:
        case -ENOBUFS:
-               /* retry with existing socket, after a delay */
+               /*
+                * xs_tcp_force_close() wakes tasks with -EIO.
+                * We need to wake them first to ensure the
+                * correct error code.
+                */
+               xprt_wake_pending_tasks(xprt, status);
                xs_tcp_force_close(xprt);
                goto out;
        }
index f9b92ece78343a463a37d33e3f73fa8621b84444..5afd1098e33a173a18b2310aa24d205534df711c 100644 (file)
@@ -23,10 +23,11 @@ class LxDmesg(gdb.Command):
         super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
 
     def invoke(self, arg, from_tty):
-        log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16)
-        log_first_idx = int(gdb.parse_and_eval("log_first_idx"))
-        log_next_idx = int(gdb.parse_and_eval("log_next_idx"))
-        log_buf_len = int(gdb.parse_and_eval("log_buf_len"))
+        log_buf_addr = int(str(gdb.parse_and_eval(
+            "'printk.c'::log_buf")).split()[0], 16)
+        log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
+        log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
+        log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
 
         inf = gdb.inferiors()[0]
         start = log_buf_addr + log_first_idx
index 2f836ca09860e83995630de7be6546731523489a..cd67d1c12cf1ca9a32daa4de797dc0a5ec7bbb86 100644 (file)
@@ -1618,6 +1618,7 @@ static int snd_timer_user_tselect(struct file *file,
        if (err < 0)
                goto __err;
 
+       tu->qhead = tu->qtail = tu->qused = 0;
        kfree(tu->queue);
        tu->queue = NULL;
        kfree(tu->tqueue);
@@ -1959,6 +1960,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
        tu = file->private_data;
        unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
+       mutex_lock(&tu->ioctl_lock);
        spin_lock_irq(&tu->qlock);
        while ((long)count - result >= unit) {
                while (!tu->qused) {
@@ -1974,7 +1976,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                        add_wait_queue(&tu->qchange_sleep, &wait);
 
                        spin_unlock_irq(&tu->qlock);
+                       mutex_unlock(&tu->ioctl_lock);
                        schedule();
+                       mutex_lock(&tu->ioctl_lock);
                        spin_lock_irq(&tu->qlock);
 
                        remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -1994,7 +1998,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                tu->qused--;
                spin_unlock_irq(&tu->qlock);
 
-               mutex_lock(&tu->ioctl_lock);
                if (tu->tread) {
                        if (copy_to_user(buffer, &tu->tqueue[qhead],
                                         sizeof(struct snd_timer_tread)))
@@ -2004,7 +2007,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                                         sizeof(struct snd_timer_read)))
                                err = -EFAULT;
                }
-               mutex_unlock(&tu->ioctl_lock);
 
                spin_lock_irq(&tu->qlock);
                if (err < 0)
@@ -2014,6 +2016,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
        }
  _error:
        spin_unlock_irq(&tu->qlock);
+       mutex_unlock(&tu->ioctl_lock);
        return result > 0 ? result : err;
 }
 
index 918e45268915de1c64e5b8b783ba423488e8f319..cbeebc0a9711e8283090762450525a84022db600 100644 (file)
@@ -2324,11 +2324,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
 
        SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
-       SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
-       SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
        SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+       SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
        SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+       SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -5854,7 +5854,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
@@ -5862,13 +5866,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+       SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
-       SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
        SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
index 7ae46c2647d453bcad1176b6877fcbbae110416b..b7ef8c59b49a2bdb2895f2203e0c207170227c6f 100644 (file)
@@ -301,6 +301,14 @@ static int atmel_classd_codec_probe(struct snd_soc_codec *codec)
        return 0;
 }
 
+static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
+{
+       struct snd_soc_card *card = snd_soc_codec_get_drvdata(codec);
+       struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
+
+       return regcache_sync(dd->regmap);
+}
+
 static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
 {
        return dev_get_regmap(dev, NULL);
@@ -308,6 +316,7 @@ static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
 
 static struct snd_soc_codec_driver soc_codec_dev_classd = {
        .probe          = atmel_classd_codec_probe,
+       .resume         = atmel_classd_codec_resume,
        .get_regmap     = atmel_classd_codec_get_remap,
        .component_driver = {
                .controls               = atmel_classd_snd_controls,
index 6dd7578f0bb8da118adfdb6da76e3247289bfc32..024d83fa6a7f78b81da8b55c8dbdec977871ef46 100644 (file)
@@ -772,7 +772,7 @@ static int da7213_dai_event(struct snd_soc_dapm_widget *w,
                                ++i;
                                msleep(50);
                        }
-               } while ((i < DA7213_SRM_CHECK_RETRIES) & (!srm_lock));
+               } while ((i < DA7213_SRM_CHECK_RETRIES) && (!srm_lock));
 
                if (!srm_lock)
                        dev_warn(codec->dev, "SRM failed to lock\n");
index 9c365a7f758dbb9f1227c726f148b19672402f7c..7899a2cdeb42f46c5d76cd051319a4b547b1ed04 100644 (file)
@@ -1108,6 +1108,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
                }
        },
+       {
+               .ident = "Thinkpad Helix 2nd",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd")
+               }
+       },
 
        { }
 };
index 2c9dedab5184ff74909caf163f7d0697d8b67949..bc136d2bd7cdeb68b5ca7a24db94a193ede323b3 100644 (file)
@@ -202,7 +202,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
        if (ret < 0)
                return ret;
 
-       ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX);
+       ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
        if (ret < 0)
                return ret;
 
index 58c525096a7cbcd6ea4fd833d06e03a0127201fe..498b15345b1a657d608a3fcff773dae206e819b0 100644 (file)
@@ -413,8 +413,11 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
        u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
        u64 *ipc_header = (u64 *)(&header);
        struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
+       unsigned long flags;
 
+       spin_lock_irqsave(&ipc->dsp->spinlock, flags);
        msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
+       spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
        if (msg == NULL) {
                dev_dbg(ipc->dev, "ipc: rx list is empty\n");
                return;
@@ -456,8 +459,10 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
                }
        }
 
+       spin_lock_irqsave(&ipc->dsp->spinlock, flags);
        list_del(&msg->list);
        sst_ipc_tx_msg_reply_complete(ipc, msg);
+       spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
 }
 
 irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
index 3a99712e44a80df81f7ad27d52e69501f617e948..64a0f8ed33e135eb5c0af683624afa3ebdb616b9 100644 (file)
@@ -2502,7 +2502,7 @@ static int skl_tplg_get_manifest_tkn(struct device *dev,
 
                        if (ret < 0)
                                return ret;
-                       tkn_count += ret;
+                       tkn_count = ret;
 
                        tuple_size += tkn_count *
                                sizeof(struct snd_soc_tplg_vendor_string_elem);
index 6df3b317a4768e008b539f0a619308e9545a3992..4c9b5781282bb149e8d749b32f2a1bd5c2987338 100644 (file)
@@ -410,7 +410,7 @@ static int skl_free(struct hdac_ext_bus *ebus)
        struct skl *skl  = ebus_to_skl(ebus);
        struct hdac_bus *bus = ebus_to_hbus(ebus);
 
-       skl->init_failed = 1; /* to be sure */
+       skl->init_done = 0; /* to be sure */
 
        snd_hdac_ext_stop_streams(ebus);
 
@@ -428,8 +428,10 @@ static int skl_free(struct hdac_ext_bus *ebus)
 
        snd_hdac_ext_bus_exit(ebus);
 
+       cancel_work_sync(&skl->probe_work);
        if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
                snd_hdac_i915_exit(&ebus->bus);
+
        return 0;
 }
 
@@ -566,6 +568,84 @@ static const struct hdac_bus_ops bus_core_ops = {
        .get_response = snd_hdac_bus_get_response,
 };
 
+static int skl_i915_init(struct hdac_bus *bus)
+{
+       int err;
+
+       /*
+        * The HDMI codec is in GPU so we need to ensure that it is powered
+        * up and ready for probe
+        */
+       err = snd_hdac_i915_init(bus);
+       if (err < 0)
+               return err;
+
+       err = snd_hdac_display_power(bus, true);
+       if (err < 0)
+               dev_err(bus->dev, "Cannot turn on display power on i915\n");
+
+       return err;
+}
+
+static void skl_probe_work(struct work_struct *work)
+{
+       struct skl *skl = container_of(work, struct skl, probe_work);
+       struct hdac_ext_bus *ebus = &skl->ebus;
+       struct hdac_bus *bus = ebus_to_hbus(ebus);
+       struct hdac_ext_link *hlink = NULL;
+       int err;
+
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+               err = skl_i915_init(bus);
+               if (err < 0)
+                       return;
+       }
+
+       err = skl_init_chip(bus, true);
+       if (err < 0) {
+               dev_err(bus->dev, "Init chip failed with err: %d\n", err);
+               goto out_err;
+       }
+
+       /* codec detection */
+       if (!bus->codec_mask)
+               dev_info(bus->dev, "no hda codecs found!\n");
+
+       /* create codec instances */
+       err = skl_codec_create(ebus);
+       if (err < 0)
+               goto out_err;
+
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+               err = snd_hdac_display_power(bus, false);
+               if (err < 0) {
+                       dev_err(bus->dev, "Cannot turn off display power on i915\n");
+                       return;
+               }
+       }
+
+       /* register platform dai and controls */
+       err = skl_platform_register(bus->dev);
+       if (err < 0)
+               return;
+       /*
+        * we are done probing so decrement link counts
+        */
+       list_for_each_entry(hlink, &ebus->hlink_list, list)
+               snd_hdac_ext_bus_link_put(ebus, hlink);
+
+       /* configure PM */
+       pm_runtime_put_noidle(bus->dev);
+       pm_runtime_allow(bus->dev);
+       skl->init_done = 1;
+
+       return;
+
+out_err:
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+               err = snd_hdac_display_power(bus, false);
+}
+
 /*
  * constructor
  */
@@ -593,6 +673,7 @@ static int skl_create(struct pci_dev *pci,
        snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
        ebus->bus.use_posbuf = 1;
        skl->pci = pci;
+       INIT_WORK(&skl->probe_work, skl_probe_work);
 
        ebus->bus.bdl_pos_adj = 0;
 
@@ -601,27 +682,6 @@ static int skl_create(struct pci_dev *pci,
        return 0;
 }
 
-static int skl_i915_init(struct hdac_bus *bus)
-{
-       int err;
-
-       /*
-        * The HDMI codec is in GPU so we need to ensure that it is powered
-        * up and ready for probe
-        */
-       err = snd_hdac_i915_init(bus);
-       if (err < 0)
-               return err;
-
-       err = snd_hdac_display_power(bus, true);
-       if (err < 0) {
-               dev_err(bus->dev, "Cannot turn on display power on i915\n");
-               return err;
-       }
-
-       return err;
-}
-
 static int skl_first_init(struct hdac_ext_bus *ebus)
 {
        struct skl *skl = ebus_to_skl(ebus);
@@ -684,20 +744,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
        /* initialize chip */
        skl_init_pci(skl);
 
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
-               err = skl_i915_init(bus);
-               if (err < 0)
-                       return err;
-       }
-
-       skl_init_chip(bus, true);
-
-       /* codec detection */
-       if (!bus->codec_mask) {
-               dev_info(bus->dev, "no hda codecs found!\n");
-       }
-
-       return 0;
+       return skl_init_chip(bus, true);
 }
 
 static int skl_probe(struct pci_dev *pci,
@@ -706,7 +753,6 @@ static int skl_probe(struct pci_dev *pci,
        struct skl *skl;
        struct hdac_ext_bus *ebus = NULL;
        struct hdac_bus *bus = NULL;
-       struct hdac_ext_link *hlink = NULL;
        int err;
 
        /* we use ext core ops, so provide NULL for ops here */
@@ -729,7 +775,7 @@ static int skl_probe(struct pci_dev *pci,
 
        if (skl->nhlt == NULL) {
                err = -ENODEV;
-               goto out_display_power_off;
+               goto out_free;
        }
 
        err = skl_nhlt_create_sysfs(skl);
@@ -760,56 +806,24 @@ static int skl_probe(struct pci_dev *pci,
        if (bus->mlcap)
                snd_hdac_ext_bus_get_ml_capabilities(ebus);
 
+       snd_hdac_bus_stop_chip(bus);
+
        /* create device for soc dmic */
        err = skl_dmic_device_register(skl);
        if (err < 0)
                goto out_dsp_free;
 
-       /* register platform dai and controls */
-       err = skl_platform_register(bus->dev);
-       if (err < 0)
-               goto out_dmic_free;
-
-       /* create codec instances */
-       err = skl_codec_create(ebus);
-       if (err < 0)
-               goto out_unregister;
-
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
-               err = snd_hdac_display_power(bus, false);
-               if (err < 0) {
-                       dev_err(bus->dev, "Cannot turn off display power on i915\n");
-                       return err;
-               }
-       }
-
-       /*
-        * we are done probling so decrement link counts
-        */
-       list_for_each_entry(hlink, &ebus->hlink_list, list)
-               snd_hdac_ext_bus_link_put(ebus, hlink);
-
-       /* configure PM */
-       pm_runtime_put_noidle(bus->dev);
-       pm_runtime_allow(bus->dev);
+       schedule_work(&skl->probe_work);
 
        return 0;
 
-out_unregister:
-       skl_platform_unregister(bus->dev);
-out_dmic_free:
-       skl_dmic_device_unregister(skl);
 out_dsp_free:
        skl_free_dsp(skl);
 out_mach_free:
        skl_machine_device_unregister(skl);
 out_nhlt_free:
        skl_nhlt_free(skl->nhlt);
-out_display_power_off:
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
-               snd_hdac_display_power(bus, false);
 out_free:
-       skl->init_failed = 1;
        skl_free(ebus);
 
        return err;
@@ -828,7 +842,7 @@ static void skl_shutdown(struct pci_dev *pci)
 
        skl = ebus_to_skl(ebus);
 
-       if (skl->init_failed)
+       if (!skl->init_done)
                return;
 
        snd_hdac_ext_stop_streams(ebus);
index a454f6035f3e64b3be01ea4c53153c141e3a9561..2a630fcb7f088c1d548f06de31933ca662eafca2 100644 (file)
@@ -46,7 +46,7 @@ struct skl {
        struct hdac_ext_bus ebus;
        struct pci_dev *pci;
 
-       unsigned int init_failed:1; /* delayed init failed */
+       unsigned int init_done:1; /* delayed init status */
        struct platform_device *dmic_dev;
        struct platform_device *i2s_dev;
        struct snd_soc_platform *platform;
@@ -64,6 +64,8 @@ struct skl {
        const struct firmware *tplg;
 
        int supend_active;
+
+       struct work_struct probe_work;
 };
 
 #define skl_to_ebus(s) (&(s)->ebus)
index 66203d107a11e5ff17e150adb145b15a0d27934e..d3b0dc145a560c8a35ddc4320810039f481db5aa 100644 (file)
@@ -507,7 +507,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                                rbga = rbgx;
                                adg->rbga_rate_for_441khz = rate / div;
                                ckr |= brg_table[i] << 20;
-                               if (req_441kHz_rate)
+                               if (req_441kHz_rate &&
+                                   !(adg_mode_flags(adg) & AUDIO_OUT_48))
                                        parent_clk_name = __clk_get_name(clk);
                        }
                }
@@ -522,7 +523,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                                rbgb = rbgx;
                                adg->rbgb_rate_for_48khz = rate / div;
                                ckr |= brg_table[i] << 16;
-                               if (req_48kHz_rate)
+                               if (req_48kHz_rate &&
+                                   (adg_mode_flags(adg) & AUDIO_OUT_48))
                                        parent_clk_name = __clk_get_name(clk);
                        }
                }
index 7d92a24b7cfa558afbb8331401c974c59d5f1ae5..d879c010cf03c4607ebdab3c854a582d816ddf62 100644 (file)
@@ -89,6 +89,7 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
        dev_dbg(dev, "ctu/mix path = 0x%08x", data);
 
        rsnd_mod_write(mod, CMD_ROUTE_SLCT, data);
+       rsnd_mod_write(mod, CMD_BUSIF_MODE, rsnd_get_busif_shift(io, mod) | 1);
        rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
 
        rsnd_adg_set_cmd_timsel_gen2(mod, io);
index 1744015408c38f2ad530fbcbae9a22027f5f0828..8c1f4e2e0c4fb8c3ac09a641b4b8a928defb871c 100644 (file)
@@ -343,6 +343,57 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
                return 0x76543210;
 }
 
+u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
+{
+       enum rsnd_mod_type playback_mods[] = {
+               RSND_MOD_SRC,
+               RSND_MOD_CMD,
+               RSND_MOD_SSIU,
+       };
+       enum rsnd_mod_type capture_mods[] = {
+               RSND_MOD_CMD,
+               RSND_MOD_SRC,
+               RSND_MOD_SSIU,
+       };
+       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+       struct rsnd_mod *tmod = NULL;
+       enum rsnd_mod_type *mods =
+               rsnd_io_is_play(io) ?
+               playback_mods : capture_mods;
+       int i;
+
+       /*
+        * This is needed for 24bit data
+        * We need to shift 8bit
+        *
+        * Linux 24bit data is located as 0x00******
+        * HW    24bit data is located as 0x******00
+        *
+        */
+       switch (runtime->sample_bits) {
+       case 16:
+               return 0;
+       case 32:
+               break;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
+               tmod = rsnd_io_to_mod(io, mods[i]);
+               if (tmod)
+                       break;
+       }
+
+       if (tmod != mod)
+               return 0;
+
+       if (rsnd_io_is_play(io))
+               return  (0 << 20) | /* shift to Left */
+                       (8 << 16);  /* 8bit */
+       else
+               return  (1 << 20) | /* shift to Right */
+                       (8 << 16);  /* 8bit */
+}
+
 /*
  *     rsnd_dai functions
  */
index 63b6d3c28021024b1f06278c5c4f217a394faf8c..4b0980728e13ec75f18ac07135ab4971290b5310 100644 (file)
@@ -236,6 +236,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
                RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc,    0x20),
                RSND_GEN_M_REG(SRC_CTRL,        0x10,   0x20),
                RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18,   0x20),
+               RSND_GEN_M_REG(CMD_BUSIF_MODE,  0x184,  0x20),
                RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188,  0x20),
                RSND_GEN_M_REG(CMD_ROUTE_SLCT,  0x18c,  0x20),
                RSND_GEN_M_REG(CMD_CTRL,        0x190,  0x20),
index dbf4163427e808d62dbc37aa62f64b482e65e7c4..323af41ecfcb8ffea222f25fbe6c524968f29fd5 100644 (file)
@@ -73,6 +73,7 @@ enum rsnd_reg {
        RSND_REG_SCU_SYS_INT_EN0,
        RSND_REG_SCU_SYS_INT_EN1,
        RSND_REG_CMD_CTRL,
+       RSND_REG_CMD_BUSIF_MODE,
        RSND_REG_CMD_BUSIF_DALIGN,
        RSND_REG_CMD_ROUTE_SLCT,
        RSND_REG_CMDOUT_TIMSEL,
@@ -204,6 +205,7 @@ void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
                    u32 mask, u32 data);
 u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
 u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
 
 /*
  *     R-Car DMA
index 20b5b2ec625ea7b1e1812ea83d07d35b48b948ea..76a477a3ccb5d88e18fd8398d9ad2b2616a99f48 100644 (file)
@@ -190,11 +190,13 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
        struct device *dev = rsnd_priv_to_dev(priv);
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+       int is_play = rsnd_io_is_play(io);
        int use_src = 0;
        u32 fin, fout;
        u32 ifscr, fsrate, adinr;
        u32 cr, route;
        u32 bsdsr, bsisr;
+       u32 i_busif, o_busif, tmp;
        uint ratio;
 
        if (!runtime)
@@ -270,6 +272,11 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
                break;
        }
 
+       /* BUSIF_MODE */
+       tmp = rsnd_get_busif_shift(io, mod);
+       i_busif = ( is_play ? tmp : 0) | 1;
+       o_busif = (!is_play ? tmp : 0) | 1;
+
        rsnd_mod_write(mod, SRC_ROUTE_MODE0, route);
 
        rsnd_mod_write(mod, SRC_SRCIR, 1);      /* initialize */
@@ -281,8 +288,9 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        rsnd_mod_write(mod, SRC_BSISR, bsisr);
        rsnd_mod_write(mod, SRC_SRCIR, 0);      /* cancel initialize */
 
-       rsnd_mod_write(mod, SRC_I_BUSIF_MODE, 1);
-       rsnd_mod_write(mod, SRC_O_BUSIF_MODE, 1);
+       rsnd_mod_write(mod, SRC_I_BUSIF_MODE, i_busif);
+       rsnd_mod_write(mod, SRC_O_BUSIF_MODE, o_busif);
+
        rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
 
        rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
index 135c5669f7963bd228c9a1187bc6f5dfd13bc04e..91e5c07911b4a5b14364becf64c568d7a61cc1c4 100644 (file)
@@ -302,7 +302,7 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
         * always use 32bit system word.
         * see also rsnd_ssi_master_clk_enable()
         */
-       cr_own = FORCE | SWL_32 | PDTA;
+       cr_own = FORCE | SWL_32;
 
        if (rdai->bit_clk_inv)
                cr_own |= SCKP;
@@ -550,6 +550,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
                struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
                u32 *buf = (u32 *)(runtime->dma_area +
                                   rsnd_dai_pointer_offset(io, 0));
+               int shift = 0;
+
+               switch (runtime->sample_bits) {
+               case 32:
+                       shift = 8;
+                       break;
+               }
 
                /*
                 * 8/16/32 data can be assesse to TDR/RDR register
@@ -557,9 +564,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
                 * see rsnd_ssi_init()
                 */
                if (rsnd_io_is_play(io))
-                       rsnd_mod_write(mod, SSITDR, *buf);
+                       rsnd_mod_write(mod, SSITDR, (*buf) << shift);
                else
-                       *buf = rsnd_mod_read(mod, SSIRDR);
+                       *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
 
                elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
        }
@@ -709,6 +716,11 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
                               struct rsnd_priv *priv)
 {
        struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
+
+       /* Do nothing for SSI parent mod */
+       if (ssi_parent_mod == mod)
+               return 0;
 
        /* PIO will request IRQ again */
        free_irq(ssi->irq, mod);
index 14fafdaf1395f9737191df18599ee58fc4f858fd..512d238b79e2895f13a4b1b7be3e145859a65280 100644 (file)
@@ -144,7 +144,8 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
                               (rsnd_io_is_play(io) ?
                                rsnd_runtime_channel_after_ctu(io) :
                                rsnd_runtime_channel_original(io)));
-               rsnd_mod_write(mod, SSI_BUSIF_MODE,  1);
+               rsnd_mod_write(mod, SSI_BUSIF_MODE,
+                              rsnd_get_busif_shift(io, mod) | 1);
                rsnd_mod_write(mod, SSI_BUSIF_DALIGN,
                               rsnd_get_dalign(mod, io));
        }
index aae099c0e50280d67f153d6769ac4237d531e169..754e3ef8d7ae1b8b188c3e52986f2c306fb7b763 100644 (file)
@@ -2286,6 +2286,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
        list_for_each_entry(rtd, &card->rtd_list, list)
                flush_delayed_work(&rtd->delayed_work);
 
+       /* free the ALSA card at first; this syncs with pending operations */
+       snd_card_free(card->snd_card);
+
        /* remove and free each DAI */
        soc_remove_dai_links(card);
        soc_remove_pcm_runtimes(card);
@@ -2300,9 +2303,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
        if (card->remove)
                card->remove(card);
 
-       snd_card_free(card->snd_card);
        return 0;
-
 }
 
 /* removes a socdev */
index dc48eedea92e7aaaba64f4db4053fea1b73d0b77..26ed23b18b7774fd495f7ce90e51ec6dc8022e2d 100644 (file)
@@ -698,16 +698,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
        struct snd_usb_audio *chip = elem->head.mixer->chip;
        struct snd_us16x08_meter_store *store = elem->private_data;
        u8 meter_urb[64];
-       char tmp[sizeof(mix_init_msg2)] = {0};
 
        switch (kcontrol->private_value) {
-       case 0:
-               snd_us16x08_send_urb(chip, (char *)mix_init_msg1,
-                                    sizeof(mix_init_msg1));
+       case 0: {
+               char tmp[sizeof(mix_init_msg1)];
+
+               memcpy(tmp, mix_init_msg1, sizeof(mix_init_msg1));
+               snd_us16x08_send_urb(chip, tmp, 4);
                snd_us16x08_recv_urb(chip, meter_urb,
                        sizeof(meter_urb));
                kcontrol->private_value++;
                break;
+       }
        case 1:
                snd_us16x08_recv_urb(chip, meter_urb,
                        sizeof(meter_urb));
@@ -718,15 +720,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
                        sizeof(meter_urb));
                kcontrol->private_value++;
                break;
-       case 3:
+       case 3: {
+               char tmp[sizeof(mix_init_msg2)];
+
                memcpy(tmp, mix_init_msg2, sizeof(mix_init_msg2));
                tmp[2] = snd_get_meter_comp_index(store);
-               snd_us16x08_send_urb(chip, tmp, sizeof(mix_init_msg2));
+               snd_us16x08_send_urb(chip, tmp, 10);
                snd_us16x08_recv_urb(chip, meter_urb,
                        sizeof(meter_urb));
                kcontrol->private_value = 0;
                break;
        }
+       }
 
        for (set = 0; set < 6; set++)
                get_meter_levels_from_urb(set, store, meter_urb);
@@ -1135,7 +1140,7 @@ static const struct snd_us16x08_control_params eq_controls[] = {
                .control_id = SND_US16X08_ID_EQLOWMIDWIDTH,
                .type = USB_MIXER_U8,
                .num_channels = 16,
-               .name = "EQ MidQLow Q",
+               .name = "EQ MidLow Q",
        },
        { /* EQ mid high gain */
                .kcontrol_new = &snd_us16x08_eq_gain_ctl,
index c0c48507e44e2992b3624b2374cd08769a8bb607..ad0543e21760562d94bc0a44676700b1179c1787 100644 (file)
@@ -220,6 +220,7 @@ config INITRAMFS_COMPRESSION_LZ4
 endchoice
 
 config INITRAMFS_COMPRESSION
+       depends on INITRAMFS_SOURCE!=""
        string
        default ""      if INITRAMFS_COMPRESSION_NONE
        default ".gz"   if INITRAMFS_COMPRESSION_GZIP