]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 20 Aug 2017 16:36:52 +0000 (09:36 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 20 Aug 2017 16:36:52 +0000 (09:36 -0700)
Pull x86 fixes from Thomas Gleixner:
 "Another pile of small fixes and updates for x86:

   - Plug a hole in the SMAP implementation which misses to clear AC on
     NMI entry

   - Fix the norandmaps/ADDR_NO_RANDOMIZE logic so the command line
     parameter works correctly again

   - Use the proper accessor in the startup64 code for next_early_pgt to
     prevent accessing of invalid addresses and faulting in the early
     boot code.

   - Prevent CPU hotplug lock recursion in the MTRR code

   - Unbreak CPU0 hotplugging

   - Rename overly long CPUID bits which got introduced in this cycle

   - Two commits which mark data 'const' and restrict the scope of data
     and functions to file scope by making them 'static'"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86: Constify attribute_group structures
  x86/boot/64/clang: Use fixup_pointer() to access 'next_early_pgt'
  x86/elf: Remove the unnecessary ADDR_NO_RANDOMIZE checks
  x86: Fix norandmaps/ADDR_NO_RANDOMIZE
  x86/mtrr: Prevent CPU hotplug lock recursion
  x86: Mark various structures and functions as 'static'
  x86/cpufeature, kvm/svm: Rename (shorten) the new "virtualized VMSAVE/VMLOAD" CPUID flag
  x86/smpboot: Unbreak CPU0 hotplug
  x86/asm/64: Clear AC on NMI entries

350 files changed:
Documentation/fb/efifb.txt
Documentation/printk-formats.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
arch/arm/boot/dts/imx7d-sdb.dts
arch/arm/boot/dts/sama5d2.dtsi
arch/arm/include/asm/tlb.h
arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
arch/arm64/boot/dts/renesas/salvator-common.dtsi
arch/arm64/include/asm/arch_timer.h
arch/arm64/include/asm/elf.h
arch/ia64/include/asm/tlb.h
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/boot/compressed/.gitignore [new file with mode: 0644]
arch/mips/cavium-octeon/octeon-usb.c
arch/mips/dec/int-handler.S
arch/mips/include/asm/cache.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/octeon/cvmx-l2c-defs.h
arch/mips/include/asm/octeon/cvmx-l2d-defs.h [new file with mode: 0644]
arch/mips/include/asm/octeon/cvmx.h
arch/mips/kernel/smp.c
arch/mips/mm/uasm-mips.c
arch/mips/pci/pci.c
arch/mips/vdso/gettimeofday.c
arch/powerpc/Kconfig
arch/powerpc/configs/powernv_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/watchdog.c
arch/powerpc/platforms/powernv/idle.c
arch/s390/include/asm/tlb.h
arch/sh/include/asm/tlb.h
arch/sparc/include/asm/spitfire.h
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/head_64.S
arch/sparc/kernel/setup_64.c
arch/sparc/mm/init_64.c
arch/um/include/asm/tlb.h
arch/x86/Kconfig
arch/x86/crypto/sha1_avx2_x86_64_asm.S
arch/x86/crypto/sha1_ssse3_glue.c
arch/x86/events/core.c
arch/x86/include/asm/elf.h
arch/x86/include/asm/hypervisor.h
arch/x86/kernel/cpu/aperfmperf.c
arch/x86/mm/init.c
arch/x86/xen/enlighten_hvm.c
block/bio-integrity.c
block/blk-mq-pci.c
block/blk-mq.c
drivers/acpi/spcr.c
drivers/base/firmware_class.c
drivers/block/sunvdc.c
drivers/block/xen-blkfront.c
drivers/block/zram/zram_drv.c
drivers/clocksource/Kconfig
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/em_sti.c
drivers/clocksource/timer-of.c
drivers/cpufreq/intel_pstate.c
drivers/cpuidle/cpuidle-powernv.c
drivers/crypto/ixp4xx_crypto.c
drivers/dma-buf/sync_file.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/firmware.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/stm/Kconfig
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/accel/st_accel_core.c
drivers/iio/adc/aspeed_adc.c
drivers/iio/adc/axp288_adc.c
drivers/iio/adc/sun4i-gpadc-iio.c
drivers/iio/adc/vf610_adc.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/light/tsl2563.c
drivers/iio/pressure/st_pressure_core.c
drivers/infiniband/core/device.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/hns/hns_roce_ah.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_d.h
drivers/infiniband/hw/i40iw/i40iw_puda.c
drivers/infiniband/hw/i40iw/i40iw_status.h
drivers/infiniband/hw/i40iw/i40iw_uk.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/trackpoint.c
drivers/iommu/arm-smmu.c
drivers/irqchip/irq-atmel-aic-common.c
drivers/irqchip/irq-atmel-aic-common.h
drivers/irqchip/irq-atmel-aic.c
drivers/irqchip/irq-atmel-aic5.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/irqchip/irq-gic-v3-its-platform-msi.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/isdn/mISDN/fsm.c
drivers/isdn/mISDN/fsm.h
drivers/isdn/mISDN/layer1.c
drivers/isdn/mISDN/layer2.c
drivers/isdn/mISDN/tei.c
drivers/md/md.c
drivers/md/raid5-cache.c
drivers/misc/mei/pci-me.c
drivers/misc/mei/pci-txe.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mtd/mtd_blkdevs.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.c
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/geneve.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/pci.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/fc.c
drivers/of/device.c
drivers/parisc/dino.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_nvmet.h
drivers/scsi/qla2xxx/qla_tmpl.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/ses.c
drivers/scsi/st.c
drivers/soc/imx/gpcv2.c
drivers/soc/ti/ti_sci_pm_domains.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/iio/resolver/ad2s1210.c
drivers/target/iscsi/cxgbit/cxgbit_cm.c
drivers/target/iscsi/cxgbit/cxgbit_target.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/thunderbolt/eeprom.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/amba-pl011.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/pci-quirks.h
drivers/usb/host/xhci-pci.c
drivers/usb/musb/musb_host.c
drivers/usb/phy/phy-msm-usb.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/rcar3.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/storage/unusual_uas.h
drivers/usb/storage/usb.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/imxfb.c
drivers/video/fbdev/omap2/omapfb/dss/core.c
drivers/xen/biomerge.c
drivers/xen/events/events_base.c
drivers/xen/xenbus/xenbus_xs.c
fs/devpts/inode.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/iomap.c
fs/nfs/Kconfig
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/nfs4proc.c
fs/proc/meminfo.c
fs/proc/task_mmu.c
fs/quota/dquot.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/xfs_log.c
fs/xfs/xfs_mount.c
include/asm-generic/tlb.h
include/linux/acpi.h
include/linux/devpts_fs.h
include/linux/iio/common/st_sensors.h
include/linux/memblock.h
include/linux/memcontrol.h
include/linux/mm_types.h
include/linux/net.h
include/linux/nmi.h
include/linux/nvme-fc-driver.h
include/linux/oom.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/platform_data/st_sensors_pdata.h
include/linux/sync_file.h
include/linux/wait.h
include/net/addrconf.h
include/net/bonding.h
include/net/busy_poll.h
include/net/mac80211.h
include/net/udp.h
include/target/iscsi/iscsi_target_core.h
include/uapi/drm/msm_drm.h
kernel/audit_watch.c
kernel/events/core.c
kernel/fork.c
kernel/irq/chip.c
kernel/irq/ipi.c
kernel/kmod.c
kernel/power/snapshot.c
kernel/signal.c
kernel/trace/bpf_trace.c
kernel/watchdog.c
kernel/watchdog_hld.c
lib/Kconfig.debug
lib/fault-inject.c
lib/test_kmod.c
mm/balloon_compaction.c
mm/cma_debug.c
mm/debug.c
mm/huge_memory.c
mm/hugetlb.c
mm/ksm.c
mm/memblock.c
mm/memcontrol.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/mprotect.c
mm/nobootmem.c
mm/page-writeback.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/slub.c
mm/util.c
mm/vmalloc.c
net/core/filter.c
net/dccp/proto.c
net/dsa/tag_ksz.c
net/ipv4/af_inet.c
net/ipv4/fib_semantics.c
net/ipv4/igmp.c
net/ipv4/ip_output.c
net/ipv4/route.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_ulp.c
net/ipv4/udp.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/key/af_key.c
net/mac80211/agg-rx.c
net/packet/af_packet.c
net/sched/act_ipt.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_sfq.c
net/tipc/bearer.c
net/tipc/msg.c
net/tipc/node.c
sound/core/seq/Kconfig
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_queue.c
sound/core/seq/seq_queue.h
sound/pci/emu10k1/emufx.c
sound/pci/hda/patch_realtek.c
sound/usb/mixer.c
sound/usb/mixer.h
sound/usb/mixer_quirks.c
sound/usb/quirks.c
tools/testing/selftests/futex/Makefile
tools/testing/selftests/kmod/kmod.sh [changed mode: 0644->0755]
tools/testing/selftests/sysctl/sysctl.sh [changed mode: 0644->0755]
tools/testing/selftests/timers/freq-step.c

index a59916c29b3312cd4946a1d9a8da2331819e7845..1a85c1bdaf38a9ae7fb8b6555afc30abae661a20 100644 (file)
@@ -27,5 +27,11 @@ You have to add the following kernel parameters in your elilo.conf:
        Macbook Pro 17", iMac 20" :
                video=efifb:i20
 
+Accepted options:
+
+nowc   Don't map the framebuffer write combined. This can be used
+       to workaround side-effects and slowdowns on other CPU cores
+       when large amounts of console data are written.
+
 --
 Edgar Hucek <gimli@dark-green.com>
index 65ea5915178b4b9842474b7908d06660444aae34..074670b98bac784c27eecc18f672b2f1ad24f489 100644 (file)
@@ -58,20 +58,23 @@ Symbols/Function Pointers
        %ps     versatile_init
        %pB     prev_fn_of_versatile_init+0x88/0x88
 
-For printing symbols and function pointers. The ``S`` and ``s`` specifiers
-result in the symbol name with (``S``) or without (``s``) offsets. Where
-this is used on a kernel without KALLSYMS - the symbol address is
-printed instead.
+The ``F`` and ``f`` specifiers are for printing function pointers,
+for example, f->func, &gettimeofday. They have the same result as
+``S`` and ``s`` specifiers. But they do an extra conversion on
+ia64, ppc64 and parisc64 architectures where the function pointers
+are actually function descriptors.
+
+The ``S`` and ``s`` specifiers can be used for printing symbols
+from direct addresses, for example, __builtin_return_address(0),
+(void *)regs->ip. They result in the symbol name with (``S``) or
+without (``s``) offsets. If KALLSYMS are disabled then the symbol
+address is printed instead.
 
 The ``B`` specifier results in the symbol name with offsets and should be
 used when printing stack backtraces. The specifier takes into
 consideration the effect of compiler optimisations which may occur
 when tail-call``s are used and marked with the noreturn GCC attribute.
 
-On ia64, ppc64 and parisc64 architectures function pointers are
-actually function descriptors which must first be resolved. The ``F`` and
-``f`` specifiers perform this resolution and then provide the same
-functionality as the ``S`` and ``s`` specifiers.
 
 Kernel Pointers
 ===============
index 84d6a8277cbde11208b197549d12f505556b87cf..1c3feffb1c1cfd2b46685907300a9f026fb67e6a 100644 (file)
@@ -7110,7 +7110,6 @@ M:        Marc Zyngier <marc.zyngier@arm.com>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
-T:     git git://git.infradead.org/users/jcooper/linux.git irqchip/core
 F:     Documentation/devicetree/bindings/interrupt-controller/
 F:     drivers/irqchip/
 
@@ -14004,6 +14003,7 @@ F:      drivers/block/virtio_blk.c
 F:     include/linux/virtio*.h
 F:     include/uapi/linux/virtio_*.h
 F:     drivers/crypto/virtio/
+F:     mm/balloon_compaction.c
 
 VIRTIO CRYPTO DRIVER
 M:     Gonglei <arei.gonglei@huawei.com>
index 6eba23bcb5ad031d6365d938984dd3140a596018..90da7bdc3f4552d10bbd82c4fd0a88ed2f41db37 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 13
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index dfcc8e00cf1c53a753dc753c2c5598b220b480a1..0ade3619f3c3f1332b8d89c8d06ed4a67f29025b 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <1>;
                                status = "disabled";
+                               ranges;
 
                                adc: adc@50030800 {
                                        compatible = "fsl,imx25-gcq";
index aeaa5a6e4fcf462bc8a381ab20ba85c3579dea5c..a24e4f1911abe77917726d8ff8342f7dcb88dab2 100644 (file)
        pinctrl_pcie: pciegrp {
                fsl,pins = <
                        /* PCIe reset */
-                       MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0
+                       MX6QDL_PAD_EIM_DA0__GPIO3_IO00  0x030b0
                        MX6QDL_PAD_EIM_DA4__GPIO3_IO04  0x030b0
                >;
        };
 &pcie {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_pcie>;
-       reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>;
+       reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
index 54c45402286b10dc06240aef9ec7d015a3fd5a58..0a24d1bf3c393463148919e685a879df39af58ef 100644 (file)
                        >;
                };
 
+               pinctrl_spi4: spi4grp {
+                       fsl,pins = <
+                               MX7D_PAD_GPIO1_IO09__GPIO1_IO9  0x59
+                               MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
+                               MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
+                       >;
+               };
+
                pinctrl_tsc2046_pendown: tsc2046_pendown {
                        fsl,pins = <
                                MX7D_PAD_EPDC_BDR1__GPIO2_IO29          0x59
                fsl,pins = <
                        MX7D_PAD_LPSR_GPIO1_IO01__PWM1_OUT              0x110b0
                >;
-
-               pinctrl_spi4: spi4grp {
-                       fsl,pins = <
-                               MX7D_PAD_GPIO1_IO09__GPIO1_IO9  0x59
-                               MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
-                               MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
-                       >;
-               };
        };
 };
index cc06da3943668415e9b6379dbe38d2fde36518ec..60e69aeacbdbf4dff78923f227f96f8bc49c1fbc 100644 (file)
                        #size-cells = <1>;
                        atmel,smc = <&hsmc>;
                        reg = <0x10000000 0x10000000
-                              0x40000000 0x30000000>;
+                              0x60000000 0x30000000>;
                        ranges = <0x0 0x0 0x10000000 0x10000000
                                  0x1 0x0 0x60000000 0x10000000
                                  0x2 0x0 0x70000000 0x10000000
                        };
 
                        hsmc: hsmc@f8014000 {
-                               compatible = "atmel,sama5d3-smc", "syscon", "simple-mfd";
+                               compatible = "atmel,sama5d2-smc", "syscon", "simple-mfd";
                                reg = <0xf8014000 0x1000>;
-                               interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>;
+                               interrupts = <17 IRQ_TYPE_LEVEL_HIGH 6>;
                                clocks = <&hsmc_clk>;
                                #address-cells = <1>;
                                #size-cells = <1>;
                                ranges;
 
-                               pmecc: ecc-engine@ffffc070 {
+                               pmecc: ecc-engine@f8014070 {
                                        compatible = "atmel,sama5d2-pmecc";
-                                       reg = <0xffffc070 0x490>,
-                                             <0xffffc500 0x100>;
+                                       reg = <0xf8014070 0x490>,
+                                             <0xf8014500 0x100>;
                                };
                        };
 
index 3f2eb76243e3c5f9d387959acae740ce871e5afa..d5562f9ce60079139d360e5d6afac59469051454 100644 (file)
@@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->fullmm = !(start | (end+1));
@@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 }
 
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                       unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->range_start = start;
+               tlb->range_end = end;
+       }
+
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
index 0d1f026d831aac7b7ecda69ac5ec0f57e693ec2b..ba2fde2909f949531bf4e63e25fe22856b0cec3b 100644 (file)
@@ -51,6 +51,7 @@
        compatible = "sinovoip,bananapi-m64", "allwinner,sun50i-a64";
 
        aliases {
+               ethernet0 = &emac;
                serial0 = &uart0;
                serial1 = &uart1;
        };
index 08cda24ea194cbffd08fc8de50dc7a4d525e5019..827168bc22ed2c6f9b831e4b3b92d75980becf1d 100644 (file)
@@ -51,6 +51,7 @@
        compatible = "pine64,pine64", "allwinner,sun50i-a64";
 
        aliases {
+               ethernet0 = &emac;
                serial0 = &uart0;
                serial1 = &uart1;
                serial2 = &uart2;
index 17eb1cc5bf6b4061a8ea8cf40e54b4eb52c1ebe3..216e3a5dafaef892fc764a076a0e0e5bdfaf396e 100644 (file)
@@ -53,6 +53,7 @@
                     "allwinner,sun50i-a64";
 
        aliases {
+               ethernet0 = &emac;
                serial0 = &uart0;
        };
 
index 732e2e06f503c83e42c0c4925b06295e6e47dc64..d9a720bff05d39ae93941140a3a8298f0bfa6570 100644 (file)
 };
 
 &pio {
+       interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+                    <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
        compatible = "allwinner,sun50i-h5-pinctrl";
 };
index a451996f590a5173a3e9dbe56831a623e3c0639a..f903957da504a9e02fa910243b60388dc85ec959 100644 (file)
@@ -45,7 +45,7 @@
                stdout-path = "serial0:115200n8";
        };
 
-       audio_clkout: audio_clkout {
+       audio_clkout: audio-clkout {
                /*
                 * This is same as <&rcar_sound 0>
                 * but needed to avoid cs2000/rcar_sound probe dead-lock
index 74d08e44a651b9e58de7d8a6075f6101083f5262..a652ce0a5cb2c33178ac2e447fad1159efb0c6a9 100644 (file)
@@ -65,13 +65,13 @@ DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
        u64 _val;                                                       \
        if (needs_unstable_timer_counter_workaround()) {                \
                const struct arch_timer_erratum_workaround *wa;         \
-               preempt_disable();                                      \
+               preempt_disable_notrace();                              \
                wa = __this_cpu_read(timer_unstable_counter_workaround); \
                if (wa && wa->read_##reg)                               \
                        _val = wa->read_##reg();                        \
                else                                                    \
                        _val = read_sysreg(reg);                        \
-               preempt_enable();                                       \
+               preempt_enable_notrace();                               \
        } else {                                                        \
                _val = read_sysreg(reg);                                \
        }                                                               \
index acae781f7359ece14f713e74e168deb6e47dd68b..3288c2b3673149b728ea52ee87bc65ba723c9811 100644 (file)
 
 /*
  * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * 64-bit, this is above 4GB to leave the entire 32-bit address
  * space open for things that want to use the area for 32-bit pointers.
  */
-#define ELF_ET_DYN_BASE                0x100000000UL
+#define ELF_ET_DYN_BASE                (2 * TASK_SIZE_64 / 3)
 
 #ifndef __ASSEMBLY__
 
index fced197b96264e01b20743e90706ed20cf30b242..cbe5ac3699bf0f9dbdfd726c112f6fc6bd1271f0 100644 (file)
@@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->max = ARRAY_SIZE(tlb->local);
@@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
  * collected.
  */
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                       unsigned long start, unsigned long end, bool force)
 {
+       if (force)
+               tlb->need_flush = 1;
        /*
         * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
         * tlb->end_addr.
index 8dd20358464f8efabcad2185d2b6f56c4fe805fa..48d91d5be4e9b6cf56c1cd0849830e2708967aa2 100644 (file)
@@ -2260,7 +2260,7 @@ config CPU_R4K_CACHE_TLB
 
 config MIPS_MT_SMP
        bool "MIPS MT SMP support (1 TC on each available VPE)"
-       depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
+       depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select SYNC_R4K
index 04343625b9292807d886951eaed6866426dfc42d..bc2708c9ada40cf4206cd228d84a6ebf634161b3 100644 (file)
@@ -243,8 +243,21 @@ include arch/mips/Kbuild.platforms
 ifdef CONFIG_PHYSICAL_START
 load-y                                 = $(CONFIG_PHYSICAL_START)
 endif
-entry-y                                = 0x$(shell $(NM) vmlinux 2>/dev/null \
+
+entry-noisa-y                          = 0x$(shell $(NM) vmlinux 2>/dev/null \
                                        | grep "\bkernel_entry\b" | cut -f1 -d \ )
+ifdef CONFIG_CPU_MICROMIPS
+  #
+  # Set the ISA bit, since the kernel_entry symbol in the ELF will have it
+  # clear which would lead to images containing addresses which bootloaders may
+  # jump to as MIPS32 code.
+  #
+  entry-y = $(patsubst %0,%1,$(patsubst %2,%3,$(patsubst %4,%5, \
+              $(patsubst %6,%7,$(patsubst %8,%9,$(patsubst %a,%b, \
+              $(patsubst %c,%d,$(patsubst %e,%f,$(entry-noisa-y)))))))))
+else
+  entry-y = $(entry-noisa-y)
+endif
 
 cflags-y                       += -I$(srctree)/arch/mips/include/asm/mach-generic
 drivers-$(CONFIG_PCI)          += arch/mips/pci/
diff --git a/arch/mips/boot/compressed/.gitignore b/arch/mips/boot/compressed/.gitignore
new file mode 100644 (file)
index 0000000..ebae133
--- /dev/null
@@ -0,0 +1,2 @@
+ashldi3.c
+bswapsi.c
index 542be1cd0f32c6cbe14cb0c3936ffe9cd11bd594..bfdfaf32d2c49742066329e800a6b535f363c3e1 100644 (file)
@@ -13,9 +13,9 @@
 #include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/of_platform.h>
+#include <linux/io.h>
 
 #include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-gpio-defs.h>
 
 /* USB Control Register */
 union cvm_usbdrd_uctl_ctl {
index 1910223a9c02ba1474929905b235e43a07501aaa..cea2bb1621e68b211d5dc99fdad78f208b1ad8a3 100644 (file)
                 * Find irq with highest priority
                 */
                # open coded PTR_LA t1, cpu_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
                # open coded la t1, cpu_mask_nr_tbl
                lui     t1, %hi(cpu_mask_nr_tbl)
                addiu   t1, %lo(cpu_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
-               # open coded dla t1, cpu_mask_nr_tbl
-               .set    push
-               .set    noat
-               lui     t1, %highest(cpu_mask_nr_tbl)
-               lui     AT, %hi(cpu_mask_nr_tbl)
-               daddiu  t1, t1, %higher(cpu_mask_nr_tbl)
-               daddiu  AT, AT, %lo(cpu_mask_nr_tbl)
-               dsll    t1, 32
-               daddu   t1, t1, AT
-               .set    pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
 #endif
 1:             lw      t2,(t1)
                nop
                 * Find irq with highest priority
                 */
                # open coded PTR_LA t1,asic_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
                # open coded la t1, asic_mask_nr_tbl
                lui     t1, %hi(asic_mask_nr_tbl)
                addiu   t1, %lo(asic_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
-               # open coded dla t1, asic_mask_nr_tbl
-               .set    push
-               .set    noat
-               lui     t1, %highest(asic_mask_nr_tbl)
-               lui     AT, %hi(asic_mask_nr_tbl)
-               daddiu  t1, t1, %higher(asic_mask_nr_tbl)
-               daddiu  AT, AT, %lo(asic_mask_nr_tbl)
-               dsll    t1, 32
-               daddu   t1, t1, AT
-               .set    pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
 #endif
 2:             lw      t2,(t1)
                nop
index fc67947ed6586c2e416b2f6cc588a4445b3761ab..8b14c2706aa52ca83e07ff4dcb37c71850d133bc 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef _ASM_CACHE_H
 #define _ASM_CACHE_H
 
+#include <kmalloc.h>
+
 #define L1_CACHE_SHIFT         CONFIG_MIPS_L1_CACHE_SHIFT
 #define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
 
index 8baa9033b181d2ca2e3f6e469315bdb8bf5345b5..721b698bfe3cf7e0274bb9f0bb4bf58bfecef29e 100644 (file)
 #ifndef cpu_scache_line_size
 #define cpu_scache_line_size() cpu_data[0].scache.linesz
 #endif
+#ifndef cpu_tcache_line_size
+#define cpu_tcache_line_size() cpu_data[0].tcache.linesz
+#endif
 
 #ifndef cpu_hwrena_impl_bits
 #define cpu_hwrena_impl_bits           0
index d045973ddb336abbe9b54b21a65b4fb1a6c4f5fe..3ea84acf1814cb2c874ac17dafc73c64db50b5dc 100644 (file)
 #define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
 #define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
 #define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
+#define CVMX_L2C_ERR_TDTX(block_id)                                           \
+       (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_ERR_TTGX(block_id)                                           \
+       (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
 #define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
 #define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
 #define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
                ((offset) & 1) * 8)
 #define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull)    + \
                ((offset) & 31) * 8)
-#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
 
 
+union cvmx_l2c_err_tdtx {
+       uint64_t u64;
+       struct cvmx_l2c_err_tdtx_s {
+               __BITFIELD_FIELD(uint64_t dbe:1,
+               __BITFIELD_FIELD(uint64_t sbe:1,
+               __BITFIELD_FIELD(uint64_t vdbe:1,
+               __BITFIELD_FIELD(uint64_t vsbe:1,
+               __BITFIELD_FIELD(uint64_t syn:10,
+               __BITFIELD_FIELD(uint64_t reserved_22_49:28,
+               __BITFIELD_FIELD(uint64_t wayidx:18,
+               __BITFIELD_FIELD(uint64_t reserved_2_3:2,
+               __BITFIELD_FIELD(uint64_t type:2,
+               ;)))))))))
+       } s;
+};
+
+union cvmx_l2c_err_ttgx {
+       uint64_t u64;
+       struct cvmx_l2c_err_ttgx_s {
+               __BITFIELD_FIELD(uint64_t dbe:1,
+               __BITFIELD_FIELD(uint64_t sbe:1,
+               __BITFIELD_FIELD(uint64_t noway:1,
+               __BITFIELD_FIELD(uint64_t reserved_56_60:5,
+               __BITFIELD_FIELD(uint64_t syn:6,
+               __BITFIELD_FIELD(uint64_t reserved_22_49:28,
+               __BITFIELD_FIELD(uint64_t wayidx:15,
+               __BITFIELD_FIELD(uint64_t reserved_2_6:5,
+               __BITFIELD_FIELD(uint64_t type:2,
+               ;)))))))))
+       } s;
+};
+
 union cvmx_l2c_cfg {
        uint64_t u64;
        struct cvmx_l2c_cfg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
new file mode 100644 (file)
index 0000000..a951ad5
--- /dev/null
@@ -0,0 +1,60 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2D_DEFS_H__
+#define __CVMX_L2D_DEFS_H__
+
+#define CVMX_L2D_ERR   (CVMX_ADD_IO_SEG(0x0001180080000010ull))
+#define CVMX_L2D_FUS3  (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
+
+
+union cvmx_l2d_err {
+       uint64_t u64;
+       struct cvmx_l2d_err_s {
+               __BITFIELD_FIELD(uint64_t reserved_6_63:58,
+               __BITFIELD_FIELD(uint64_t bmhclsel:1,
+               __BITFIELD_FIELD(uint64_t ded_err:1,
+               __BITFIELD_FIELD(uint64_t sec_err:1,
+               __BITFIELD_FIELD(uint64_t ded_intena:1,
+               __BITFIELD_FIELD(uint64_t sec_intena:1,
+               __BITFIELD_FIELD(uint64_t ecc_ena:1,
+               ;)))))))
+       } s;
+};
+
+union cvmx_l2d_fus3 {
+       uint64_t u64;
+       struct cvmx_l2d_fus3_s {
+               __BITFIELD_FIELD(uint64_t reserved_40_63:24,
+               __BITFIELD_FIELD(uint64_t ema_ctl:3,
+               __BITFIELD_FIELD(uint64_t reserved_34_36:3,
+               __BITFIELD_FIELD(uint64_t q3fus:34,
+               ;))))
+       } s;
+};
+
+#endif
index 9742202f2a326c45c504de6d77b51eb523421c0c..e638735cc3ac56c84fc861f293e6f2217b7ba3cb 100644 (file)
@@ -62,6 +62,7 @@ enum cvmx_mips_space {
 #include <asm/octeon/cvmx-iob-defs.h>
 #include <asm/octeon/cvmx-ipd-defs.h>
 #include <asm/octeon/cvmx-l2c-defs.h>
+#include <asm/octeon/cvmx-l2d-defs.h>
 #include <asm/octeon/cvmx-l2t-defs.h>
 #include <asm/octeon/cvmx-led-defs.h>
 #include <asm/octeon/cvmx-mio-defs.h>
index 770d4d1516cbb1ff34cca30cdccbe6c1b0aeebdc..6bace7695788fbc3b7663aeb353a08a3c45503af 100644 (file)
@@ -376,9 +376,6 @@ asmlinkage void start_secondary(void)
        cpumask_set_cpu(cpu, &cpu_coherent_mask);
        notify_cpu_starting(cpu);
 
-       complete(&cpu_running);
-       synchronise_count_slave(cpu);
-
        set_cpu_online(cpu, true);
 
        set_cpu_sibling_map(cpu);
@@ -386,6 +383,9 @@ asmlinkage void start_secondary(void)
 
        calculate_cpu_foreign_map();
 
+       complete(&cpu_running);
+       synchronise_count_slave(cpu);
+
        /*
         * irq will be enabled in ->smp_finish(), enabling it too early
         * is dangerous.
index 3f74f6c1f065fb6b5d2b930dc1d164e1624e63f0..9fea6c6bbf49e3768e81ad3ffb8b8af2bbdef61f 100644 (file)
@@ -48,7 +48,7 @@
 
 #include "uasm.c"
 
-static const struct insn const insn_table[insn_invalid] = {
+static const struct insn insn_table[insn_invalid] = {
        [insn_addiu]    = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
        [insn_addu]     = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD},
        [insn_and]      = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
index bd67ac74fe2d3420509a03647a20856341bb70c8..9632436d74d7a74b3d584ab6e87a1fc7e55827cc 100644 (file)
@@ -28,16 +28,15 @@ EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
 
 static int __init pcibios_set_cache_line_size(void)
 {
-       struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int lsize;
 
        /*
         * Set PCI cacheline size to that of the highest level in the
         * cache hierarchy.
         */
-       lsize = c->dcache.linesz;
-       lsize = c->scache.linesz ? : lsize;
-       lsize = c->tcache.linesz ? : lsize;
+       lsize = cpu_dcache_line_size();
+       lsize = cpu_scache_line_size() ? : lsize;
+       lsize = cpu_tcache_line_size() ? : lsize;
 
        BUG_ON(!lsize);
 
index 974276e828b2cdd3eecb07493f480a8779da2d41..e2690d7ca4ddd992e69fffe802cac6355ad14de8 100644 (file)
@@ -35,7 +35,8 @@ static __always_inline long gettimeofday_fallback(struct timeval *_tv,
        "       syscall\n"
        : "=r" (ret), "=r" (error)
        : "r" (tv), "r" (tz), "r" (nr)
-       : "memory");
+       : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
 
        return error ? -ret : ret;
 }
@@ -55,7 +56,8 @@ static __always_inline long clock_gettime_fallback(clockid_t _clkid,
        "       syscall\n"
        : "=r" (ret), "=r" (error)
        : "r" (clkid), "r" (ts), "r" (nr)
-       : "memory");
+       : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
 
        return error ? -ret : ret;
 }
index 36f858c37ca70b576e851a52ada48e2400de86a1..81b0031f909f6d65855ed3051505a4baa9469a64 100644 (file)
@@ -199,7 +199,7 @@ config PPC
        select HAVE_OPTPROBES                   if PPC64
        select HAVE_PERF_EVENTS
        select HAVE_PERF_EVENTS_NMI             if PPC64
-       select HAVE_HARDLOCKUP_DETECTOR_PERF    if HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
+       select HAVE_HARDLOCKUP_DETECTOR_PERF    if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_RCU_TABLE_FREE              if SMP
index 0695ce047d565199e4501333fa41ece48cdf9e45..34fc9bbfca9e68d6372e1d34b79ebf95d978e685 100644 (file)
@@ -293,7 +293,8 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
index 5175028c56ce74e3e50a2b30eabccf7b87ed8f0e..c5246d29f3859965316bd4d48e4e816283439bf0 100644 (file)
@@ -324,7 +324,8 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
index 1a61aa20dfbac9d5072ae83ef90640b8be380bd3..fd5d98a0b95c7b1ae5fda56892c2ecd43ea29f3a 100644 (file)
@@ -291,7 +291,8 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
index 49d8422767b4de686ec0ee64fbf69ac415f05003..e925c1c99c71cab982967e7f7df6325e3135506f 100644 (file)
@@ -223,17 +223,27 @@ system_call_exit:
        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
        bne-    .Lsyscall_exit_work
 
-       /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
-       li      r7,MSR_FP
+       andi.   r0,r8,MSR_FP
+       beq 2f
 #ifdef CONFIG_ALTIVEC
-       oris    r7,r7,MSR_VEC@h
+       andis.  r0,r8,MSR_VEC@h
+       bne     3f
 #endif
-       and     r0,r8,r7
-       cmpd    r0,r7
-       bne     .Lsyscall_restore_math
-.Lsyscall_restore_math_cont:
+2:     addi    r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_BOOK3S
+       li      r10,MSR_RI
+       mtmsrd  r10,1           /* Restore RI */
+#endif
+       bl      restore_math
+#ifdef CONFIG_PPC_BOOK3S
+       li      r11,0
+       mtmsrd  r11,1
+#endif
+       ld      r8,_MSR(r1)
+       ld      r3,RESULT(r1)
+       li      r11,-MAX_ERRNO
 
-       cmpld   r3,r11
+3:     cmpld   r3,r11
        ld      r5,_CCR(r1)
        bge-    .Lsyscall_error
 .Lsyscall_error_cont:
@@ -267,40 +277,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        std     r5,_CCR(r1)
        b       .Lsyscall_error_cont
 
-.Lsyscall_restore_math:
-       /*
-        * Some initial tests from restore_math to avoid the heavyweight
-        * C code entry and MSR manipulations.
-        */
-       LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
-       and.    r0,r0,r8
-       bne     1f
-
-       ld      r7,PACACURRENT(r13)
-       lbz     r0,THREAD+THREAD_LOAD_FP(r7)
-#ifdef CONFIG_ALTIVEC
-       lbz     r6,THREAD+THREAD_LOAD_VEC(r7)
-       add     r0,r0,r6
-#endif
-       cmpdi   r0,0
-       beq     .Lsyscall_restore_math_cont
-
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_PPC_BOOK3S
-       li      r10,MSR_RI
-       mtmsrd  r10,1           /* Restore RI */
-#endif
-       bl      restore_math
-#ifdef CONFIG_PPC_BOOK3S
-       li      r11,0
-       mtmsrd  r11,1
-#endif
-       /* Restore volatiles, reload MSR from updated one */
-       ld      r8,_MSR(r1)
-       ld      r3,RESULT(r1)
-       li      r11,-MAX_ERRNO
-       b       .Lsyscall_restore_math_cont
-
 /* Traced system call support */
 .Lsyscall_dotrace:
        bl      save_nvgprs
index 9f3e2c932dccc1c3a1158fc174a8cf57e63dd75d..1f0fd361e09b9415f81242d67d9b939f36fd63ba 100644 (file)
@@ -362,7 +362,8 @@ void enable_kernel_vsx(void)
 
        cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
 
-       if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
+       if (current->thread.regs &&
+           (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
                check_if_tm_restore_required(current);
                /*
                 * If a thread has already been reclaimed then the
@@ -386,7 +387,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
 {
        if (tsk->thread.regs) {
                preempt_disable();
-               if (tsk->thread.regs->msr & MSR_VSX) {
+               if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
                        BUG_ON(tsk != current);
                        giveup_vsx(tsk);
                }
@@ -511,10 +512,6 @@ void restore_math(struct pt_regs *regs)
 {
        unsigned long msr;
 
-       /*
-        * Syscall exit makes a similar initial check before branching
-        * to restore_math. Keep them in synch.
-        */
        if (!msr_tm_active(regs->msr) &&
                !current->thread.load_fp && !loadvec(current->thread))
                return;
index cf0e1245b8cc1c78948a4004be2d20c5b5ac0b78..8d3320562c70f3ef7308645fb7b805fc14794e42 100644 (file)
@@ -351,7 +351,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
        hard_irq_disable();
        while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
                raw_local_irq_restore(*flags);
-               cpu_relax();
+               spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
                raw_local_irq_save(*flags);
                hard_irq_disable();
        }
@@ -360,7 +360,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
 static void nmi_ipi_lock(void)
 {
        while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
-               cpu_relax();
+               spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 }
 
 static void nmi_ipi_unlock(void)
@@ -475,7 +475,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
        nmi_ipi_lock_start(&flags);
        while (nmi_ipi_busy_count) {
                nmi_ipi_unlock_end(&flags);
-               cpu_relax();
+               spin_until_cond(nmi_ipi_busy_count == 0);
                nmi_ipi_lock_start(&flags);
        }
 
index b67f8b03a32d0f12ce29eeb4ac3be0a97384fe72..34721a257a770c450baac0288f8006c94ff1975b 100644 (file)
@@ -71,15 +71,20 @@ static inline void wd_smp_lock(unsigned long *flags)
         * This may be called from low level interrupt handlers at some
         * point in future.
         */
-       local_irq_save(*flags);
-       while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))
-               cpu_relax();
+       raw_local_irq_save(*flags);
+       hard_irq_disable(); /* Make it soft-NMI safe */
+       while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
+               raw_local_irq_restore(*flags);
+               spin_until_cond(!test_bit(0, &__wd_smp_lock));
+               raw_local_irq_save(*flags);
+               hard_irq_disable();
+       }
 }
 
 static inline void wd_smp_unlock(unsigned long *flags)
 {
        clear_bit_unlock(0, &__wd_smp_lock);
-       local_irq_restore(*flags);
+       raw_local_irq_restore(*flags);
 }
 
 static void wd_lockup_ipi(struct pt_regs *regs)
@@ -96,10 +101,10 @@ static void wd_lockup_ipi(struct pt_regs *regs)
                nmi_panic(regs, "Hard LOCKUP");
 }
 
-static void set_cpu_stuck(int cpu, u64 tb)
+static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
 {
-       cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
-       cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+       cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
+       cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
        if (cpumask_empty(&wd_smp_cpus_pending)) {
                wd_smp_last_reset_tb = tb;
                cpumask_andnot(&wd_smp_cpus_pending,
@@ -107,6 +112,10 @@ static void set_cpu_stuck(int cpu, u64 tb)
                                &wd_smp_cpus_stuck);
        }
 }
+static void set_cpu_stuck(int cpu, u64 tb)
+{
+       set_cpumask_stuck(cpumask_of(cpu), tb);
+}
 
 static void watchdog_smp_panic(int cpu, u64 tb)
 {
@@ -135,11 +144,9 @@ static void watchdog_smp_panic(int cpu, u64 tb)
        }
        smp_flush_nmi_ipi(1000000);
 
-       /* Take the stuck CPU out of the watch group */
-       for_each_cpu(c, &wd_smp_cpus_pending)
-               set_cpu_stuck(c, tb);
+       /* Take the stuck CPUs out of the watch group */
+       set_cpumask_stuck(&wd_smp_cpus_pending, tb);
 
-out:
        wd_smp_unlock(&flags);
 
        printk_safe_flush();
@@ -152,6 +159,11 @@ out:
 
        if (hardlockup_panic)
                nmi_panic(NULL, "Hard LOCKUP");
+
+       return;
+
+out:
+       wd_smp_unlock(&flags);
 }
 
 static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
@@ -258,9 +270,11 @@ static void wd_timer_fn(unsigned long data)
 
 void arch_touch_nmi_watchdog(void)
 {
+       unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
        int cpu = smp_processor_id();
 
-       watchdog_timer_interrupt(cpu);
+       if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)
+               watchdog_timer_interrupt(cpu);
 }
 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
 
@@ -283,6 +297,8 @@ static void stop_watchdog_timer_on(unsigned int cpu)
 
 static int start_wd_on_cpu(unsigned int cpu)
 {
+       unsigned long flags;
+
        if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
                WARN_ON(1);
                return 0;
@@ -297,12 +313,14 @@ static int start_wd_on_cpu(unsigned int cpu)
        if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
                return 0;
 
+       wd_smp_lock(&flags);
        cpumask_set_cpu(cpu, &wd_cpus_enabled);
        if (cpumask_weight(&wd_cpus_enabled) == 1) {
                cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
                wd_smp_last_reset_tb = get_tb();
        }
-       smp_wmb();
+       wd_smp_unlock(&flags);
+
        start_watchdog_timer_on(cpu);
 
        return 0;
@@ -310,12 +328,17 @@ static int start_wd_on_cpu(unsigned int cpu)
 
 static int stop_wd_on_cpu(unsigned int cpu)
 {
+       unsigned long flags;
+
        if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
                return 0; /* Can happen in CPU unplug case */
 
        stop_watchdog_timer_on(cpu);
 
+       wd_smp_lock(&flags);
        cpumask_clear_cpu(cpu, &wd_cpus_enabled);
+       wd_smp_unlock(&flags);
+
        wd_smp_clear_cpu_pending(cpu, get_tb());
 
        return 0;
index 2abee070373fb3a8b757b8d3cb269e5d0b89dff6..a553aeea7af683812ba2f5a80d65e97cda163919 100644 (file)
@@ -56,6 +56,7 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
  */
 static u64 pnv_deepest_stop_psscr_val;
 static u64 pnv_deepest_stop_psscr_mask;
+static u64 pnv_deepest_stop_flag;
 static bool deepest_stop_found;
 
 static int pnv_save_sprs_for_deep_states(void)
@@ -185,8 +186,40 @@ static void pnv_alloc_idle_core_states(void)
 
        update_subcore_sibling_mask();
 
-       if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
-               pnv_save_sprs_for_deep_states();
+       if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
+               int rc = pnv_save_sprs_for_deep_states();
+
+               if (likely(!rc))
+                       return;
+
+               /*
+                * The stop-api is unable to restore hypervisor
+                * resources on wakeup from platform idle states which
+                * lose full context. So disable such states.
+                */
+               supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
+               pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
+               pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
+
+               if (cpu_has_feature(CPU_FTR_ARCH_300) &&
+                   (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
+                       /*
+                        * Use the default stop state for CPU-Hotplug
+                        * if available.
+                        */
+                       if (default_stop_found) {
+                               pnv_deepest_stop_psscr_val =
+                                       pnv_default_stop_val;
+                               pnv_deepest_stop_psscr_mask =
+                                       pnv_default_stop_mask;
+                               pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
+                                       pnv_deepest_stop_psscr_val);
+                       } else { /* Fallback to snooze loop for CPU-Hotplug */
+                               deepest_stop_found = false;
+                               pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
+                       }
+               }
+       }
 }
 
 u32 pnv_get_supported_cpuidle_states(void)
@@ -375,7 +408,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
                                                pnv_deepest_stop_psscr_val;
                srr1 = power9_idle_stop(psscr);
 
-       } else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
+       } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
+                  (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
                srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
        } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
                   (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
@@ -553,6 +587,7 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
                        max_residency_ns = residency_ns[i];
                        pnv_deepest_stop_psscr_val = psscr_val[i];
                        pnv_deepest_stop_psscr_mask = psscr_mask[i];
+                       pnv_deepest_stop_flag = flags[i];
                        deepest_stop_found = true;
                }
 
index 7317b3108a88859a91523c45f1e52c08cb22fdc4..2eb8ff0d6fca443543c32ac80ff690b4b67be1ef 100644 (file)
@@ -47,10 +47,9 @@ struct mmu_table_batch {
 extern void tlb_table_flush(struct mmu_gather *tlb);
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
-static inline void tlb_gather_mmu(struct mmu_gather *tlb,
-                                 struct mm_struct *mm,
-                                 unsigned long start,
-                                 unsigned long end)
+static inline void
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-static inline void tlb_finish_mmu(struct mmu_gather *tlb,
-                                 unsigned long start, unsigned long end)
+static inline void
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->start = start;
+               tlb->end = end;
+       }
+
        tlb_flush_mmu(tlb);
 }
 
index 46e0d635e36f711aff9a88c45955905d7fbf3cc2..51a8bc967e75f1e3c96a70783e9da439310edbcb 100644 (file)
@@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 }
 
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
-       if (tlb->fullmm)
+       if (tlb->fullmm || force)
                flush_tlb_mm(tlb->mm);
 
        /* keep the page table cache within bounds */
index 1d8321c827a8821bb4e9f4989eb883cd761370db..1b1286d0506910c0f9a92ab6af14e272dd008d61 100644 (file)
 #define SUN4V_CHIP_NIAGARA5    0x05
 #define SUN4V_CHIP_SPARC_M6    0x06
 #define SUN4V_CHIP_SPARC_M7    0x07
+#define SUN4V_CHIP_SPARC_M8    0x08
 #define SUN4V_CHIP_SPARC64X    0x8a
 #define SUN4V_CHIP_SPARC_SN    0x8b
 #define SUN4V_CHIP_UNKNOWN     0xff
 
+/*
+ * The following CPU_ID_xxx constants are used
+ * to identify the CPU type in the setup phase
+ * (see head_64.S)
+ */
+#define CPU_ID_NIAGARA1                ('1')
+#define CPU_ID_NIAGARA2                ('2')
+#define CPU_ID_NIAGARA3                ('3')
+#define CPU_ID_NIAGARA4                ('4')
+#define CPU_ID_NIAGARA5                ('5')
+#define CPU_ID_M6              ('6')
+#define CPU_ID_M7              ('7')
+#define CPU_ID_M8              ('8')
+#define CPU_ID_SONOMA1         ('N')
+
 #ifndef __ASSEMBLY__
 
 enum ultra_tlb_layout {
index 493e023a468a919c61d77451e43e0a4a2e414bbe..ef4f18f7a67402ed8baceb2ea05ee7f6368cc404 100644 (file)
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
                sparc_pmu_type = "sparc-m7";
                break;
 
+       case SUN4V_CHIP_SPARC_M8:
+               sparc_cpu_type = "SPARC-M8";
+               sparc_fpu_type = "SPARC-M8 integrated FPU";
+               sparc_pmu_type = "sparc-m8";
+               break;
+
        case SUN4V_CHIP_SPARC_SN:
                sparc_cpu_type = "SPARC-SN";
                sparc_fpu_type = "SPARC-SN integrated FPU";
index 45c820e1cba5d949ff936f15392ca3c0c8578a34..90d550bbfeefe484f1560940f111235f26332d7a 100644 (file)
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
        case SUN4V_CHIP_NIAGARA5:
        case SUN4V_CHIP_SPARC_M6:
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
        case SUN4V_CHIP_SPARC64X:
                rover_inc_table = niagara_iterate_method;
index 41a4073286671eff51f275bfca4ae6d9d01db74d..78e0211753d28f14f955af865704248b1e5daf24 100644 (file)
@@ -424,22 +424,25 @@ EXPORT_SYMBOL(sun4v_chip_type)
         nop
 
 70:    ldub    [%g1 + 7], %g2
-       cmp     %g2, '3'
+       cmp     %g2, CPU_ID_NIAGARA3
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA3, %g4
-       cmp     %g2, '4'
+       cmp     %g2, CPU_ID_NIAGARA4
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA4, %g4
-       cmp     %g2, '5'
+       cmp     %g2, CPU_ID_NIAGARA5
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA5, %g4
-       cmp     %g2, '6'
+       cmp     %g2, CPU_ID_M6
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M6, %g4
-       cmp     %g2, '7'
+       cmp     %g2, CPU_ID_M7
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M7, %g4
-       cmp     %g2, 'N'
+       cmp     %g2, CPU_ID_M8
+       be,pt   %xcc, 5f
+        mov    SUN4V_CHIP_SPARC_M8, %g4
+       cmp     %g2, CPU_ID_SONOMA1
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_SN, %g4
        ba,pt   %xcc, 49f
@@ -448,10 +451,10 @@ EXPORT_SYMBOL(sun4v_chip_type)
 91:    sethi   %hi(prom_cpu_compatible), %g1
        or      %g1, %lo(prom_cpu_compatible), %g1
        ldub    [%g1 + 17], %g2
-       cmp     %g2, '1'
+       cmp     %g2, CPU_ID_NIAGARA1
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA1, %g4
-       cmp     %g2, '2'
+       cmp     %g2, CPU_ID_NIAGARA2
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA2, %g4
        
@@ -600,6 +603,9 @@ niagara_tlb_fixup:
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_M7
+       be,pt   %xcc, niagara4_patch
+        nop
+       cmp     %g1, SUN4V_CHIP_SPARC_M8
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_SN
index 4d9c3e13c15056b5d60e7ccd266b36cfe29d2c00..150ee7d4b059a69e174dff7c7d16ff906f73e1ed 100644 (file)
@@ -288,10 +288,17 @@ static void __init sun4v_patch(void)
 
        sun4v_patch_2insn_range(&__sun4v_2insn_patch,
                                &__sun4v_2insn_patch_end);
-       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
-           sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
+
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
+       case SUN4V_CHIP_SPARC_SN:
                sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
                                         &__sun_m7_2insn_patch_end);
+               break;
+       default:
+               break;
+       }
 
        sun4v_hvapi_init();
 }
@@ -529,6 +536,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_BLKINIT;
@@ -538,6 +546,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_N2;
@@ -568,6 +577,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
@@ -578,6 +588,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
index fed73f14aa49befee59b93b0fcab02f65f7e10d2..afa0099f374852e0cf093088d942512008a45a68 100644 (file)
@@ -1944,12 +1944,22 @@ static void __init setup_page_offset(void)
                        break;
                case SUN4V_CHIP_SPARC_M7:
                case SUN4V_CHIP_SPARC_SN:
-               default:
                        /* M7 and later support 52-bit virtual addresses.  */
                        sparc64_va_hole_top =    0xfff8000000000000UL;
                        sparc64_va_hole_bottom = 0x0008000000000000UL;
                        max_phys_bits = 49;
                        break;
+               case SUN4V_CHIP_SPARC_M8:
+               default:
+                       /* M8 and later support 54-bit virtual addresses.
+                        * However, restricting M8 and above VA bits to 53
+                        * as 4-level page table cannot support more than
+                        * 53 VA bits.
+                        */
+                       sparc64_va_hole_top =    0xfff0000000000000UL;
+                       sparc64_va_hole_bottom = 0x0010000000000000UL;
+                       max_phys_bits = 51;
+                       break;
                }
        }
 
@@ -2161,6 +2171,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
                pagecv_flag = 0x00;
                break;
@@ -2313,6 +2324,7 @@ void __init paging_init(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_M8:
        case SUN4V_CHIP_SPARC_SN:
                page_cache4v_flag = _PAGE_CP_4V;
                break;
index 600a2e9bfee2feea2a6dbc8b91d2a5a872d9d8d3..344d95619d0334659e6f4a9f3a5bff70ae95f67c 100644 (file)
@@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
        tlb->start = start;
@@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-/* tlb_finish_mmu
+/* arch_tlb_finish_mmu
  *     Called at the end of the shootdown operation to free up any resources
  *     that were required.
  */
 static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
+       if (force) {
+               tlb->start = start;
+               tlb->end = end;
+               tlb->need_flush = 1;
+       }
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
index 781521b7cf9ef6b6766dac9249a3568f257c4fa8..323cb065be5eda120b44dac79618a13301ece231 100644 (file)
@@ -100,6 +100,7 @@ config X86
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select GENERIC_TIME_VSYSCALL
+       select HARDLOCKUP_CHECK_TIMESTAMP       if X86_64
        select HAVE_ACPI_APEI                   if ACPI
        select HAVE_ACPI_APEI_NMI               if ACPI
        select HAVE_ALIGNED_STRUCT_PAGE         if SLUB
@@ -163,7 +164,7 @@ config X86
        select HAVE_PCSPKR_PLATFORM
        select HAVE_PERF_EVENTS
        select HAVE_PERF_EVENTS_NMI
-       select HAVE_HARDLOCKUP_DETECTOR_PERF    if HAVE_PERF_EVENTS_NMI
+       select HAVE_HARDLOCKUP_DETECTOR_PERF    if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
index 1cd792db15efe760e3a6fc8b17b9a4c4e6f35233..1eab79c9ac484172a63d9cc0c4a409b7fffe7e8b 100644 (file)
        .set T1, REG_T1
 .endm
 
-#define K_BASE         %r8
 #define HASH_PTR       %r9
+#define BLOCKS_CTR     %r8
 #define BUFFER_PTR     %r10
 #define BUFFER_PTR2    %r13
-#define BUFFER_END     %r11
 
 #define PRECALC_BUF    %r14
 #define WK_BUF         %r15
                 * blended AVX2 and ALU instruction scheduling
                 * 1 vector iteration per 8 rounds
                 */
-               vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
+               vmovdqu (i * 2)(BUFFER_PTR), W_TMP
        .elseif ((i & 7) == 1)
-               vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
+               vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
                         WY_TMP, WY_TMP
        .elseif ((i & 7) == 2)
                vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
        .elseif ((i & 7) == 4)
-               vpaddd  K_XMM(K_BASE), WY, WY_TMP
+               vpaddd  K_XMM + K_XMM_AR(%rip), WY, WY_TMP
        .elseif ((i & 7) == 7)
                vmovdqu  WY_TMP, PRECALC_WK(i&~7)
 
                vpxor   WY, WY_TMP, WY_TMP
        .elseif ((i & 7) == 7)
                vpxor   WY_TMP2, WY_TMP, WY
-               vpaddd  K_XMM(K_BASE), WY, WY_TMP
+               vpaddd  K_XMM + K_XMM_AR(%rip), WY, WY_TMP
                vmovdqu WY_TMP, PRECALC_WK(i&~7)
 
                PRECALC_ROTATE_WY
                vpsrld  $30, WY, WY
                vpor    WY, WY_TMP, WY
        .elseif ((i & 7) == 7)
-               vpaddd  K_XMM(K_BASE), WY, WY_TMP
+               vpaddd  K_XMM + K_XMM_AR(%rip), WY, WY_TMP
                vmovdqu WY_TMP, PRECALC_WK(i&~7)
 
                PRECALC_ROTATE_WY
 
 .endm
 
+/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
+ * %1 + %2 >= %3 ? %4 : 0
+ */
+.macro ADD_IF_GE a, b, c, d
+       mov     \a, RTA
+       add     $\d, RTA
+       cmp     $\c, \b
+       cmovge  RTA, \a
+.endm
+
 /*
  * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
  */
        lea     (2*4*80+32)(%rsp), WK_BUF
 
        # Precalc WK for first 2 blocks
-       PRECALC_OFFSET = 0
+       ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
        .set i, 0
        .rept    160
                PRECALC i
                .set i, i + 1
        .endr
-       PRECALC_OFFSET = 128
+
+       /* Go to next block if needed */
+       ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
+       ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
        xchg    WK_BUF, PRECALC_BUF
 
        .align 32
@@ -479,8 +491,8 @@ _loop:
         * we use K_BASE value as a signal of a last block,
         * it is set below by: cmovae BUFFER_PTR, K_BASE
         */
-       cmp     K_BASE, BUFFER_PTR
-       jne     _begin
+       test BLOCKS_CTR, BLOCKS_CTR
+       jnz _begin
        .align 32
        jmp     _end
        .align 32
@@ -512,10 +524,10 @@ _loop0:
                .set j, j+2
        .endr
 
-       add     $(2*64), BUFFER_PTR       /* move to next odd-64-byte block */
-       cmp     BUFFER_END, BUFFER_PTR    /* is current block the last one? */
-       cmovae  K_BASE, BUFFER_PTR      /* signal the last iteration smartly */
-
+       /* Update Counter */
+       sub $1, BLOCKS_CTR
+       /* Move to the next block only if needed*/
+       ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
        /*
         * rounds
         * 60,62,64,66,68
@@ -532,8 +544,8 @@ _loop0:
        UPDATE_HASH     12(HASH_PTR), D
        UPDATE_HASH     16(HASH_PTR), E
 
-       cmp     K_BASE, BUFFER_PTR      /* is current block the last one? */
-       je      _loop
+       test    BLOCKS_CTR, BLOCKS_CTR
+       jz      _loop
 
        mov     TB, B
 
@@ -575,10 +587,10 @@ _loop2:
                .set j, j+2
        .endr
 
-       add     $(2*64), BUFFER_PTR2      /* move to next even-64-byte block */
-
-       cmp     BUFFER_END, BUFFER_PTR2   /* is current block the last one */
-       cmovae  K_BASE, BUFFER_PTR       /* signal the last iteration smartly */
+       /* update counter */
+       sub     $1, BLOCKS_CTR
+       /* Move to the next block only if needed*/
+       ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
 
        jmp     _loop3
 _loop3:
@@ -641,19 +653,12 @@ _loop3:
 
        avx2_zeroupper
 
-       lea     K_XMM_AR(%rip), K_BASE
-
+       /* Setup initial values */
        mov     CTX, HASH_PTR
        mov     BUF, BUFFER_PTR
-       lea     64(BUF), BUFFER_PTR2
-
-       shl     $6, CNT                 /* mul by 64 */
-       add     BUF, CNT
-       add     $64, CNT
-       mov     CNT, BUFFER_END
 
-       cmp     BUFFER_END, BUFFER_PTR2
-       cmovae  K_BASE, BUFFER_PTR2
+       mov     BUF, BUFFER_PTR2
+       mov     CNT, BLOCKS_CTR
 
        xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
 
index f960a043cdeba4a36f4ba20fddf81369f8ced38f..fc61739150e7c2c5a484c020b14441551fe5403a 100644 (file)
@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
 
 static bool avx2_usable(void)
 {
-       if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+       if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
                && boot_cpu_has(X86_FEATURE_BMI1)
                && boot_cpu_has(X86_FEATURE_BMI2))
                return true;
index 8e3db8f642a7a02dd8f99db3a25dedbfd1deb01a..af12e294caeda5f518cf8b80ce5765996f1d8c56 100644 (file)
@@ -2114,7 +2114,7 @@ static void refresh_pce(void *ignored)
        load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
 }
 
-static void x86_pmu_event_mapped(struct perf_event *event)
+static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
 {
        if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
                return;
@@ -2129,22 +2129,20 @@ static void x86_pmu_event_mapped(struct perf_event *event)
         * For now, this can't happen because all callers hold mmap_sem
         * for write.  If this changes, we'll need a different solution.
         */
-       lockdep_assert_held_exclusive(&current->mm->mmap_sem);
+       lockdep_assert_held_exclusive(&mm->mmap_sem);
 
-       if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
-               on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+       if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
+               on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
 }
 
-static void x86_pmu_event_unmapped(struct perf_event *event)
+static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
 {
-       if (!current->mm)
-               return;
 
        if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
                return;
 
-       if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
-               on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+       if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
+               on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
 }
 
 static int x86_pmu_event_idx(struct perf_event *event)
index 1c18d83d3f094d1f5abfa09a08b67e9cb6b7376d..9aeb91935ce02387d8dae5e2f51bf1750f420a43 100644 (file)
@@ -247,11 +247,11 @@ extern int force_personality32;
 
 /*
  * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * 64-bit, this is above 4GB to leave the entire 32-bit address
  * space open for things that want to use the area for 32-bit pointers.
  */
 #define ELF_ET_DYN_BASE                (mmap_is_ia32() ? 0x000400000UL : \
-                                                 0x100000000UL)
+                                                 (TASK_SIZE / 3 * 2))
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
index 21126155a739f4a3495499f052abcd3eab5a0bb7..0ead9dbb91301d0f7f8923dcf33f25515bd182b8 100644 (file)
@@ -43,6 +43,9 @@ struct hypervisor_x86 {
 
        /* pin current vcpu to specified physical cpu (run rarely) */
        void            (*pin_vcpu)(int);
+
+       /* called during init_mem_mapping() to setup early mappings. */
+       void            (*init_mem_mapping)(void);
 };
 
 extern const struct hypervisor_x86 *x86_hyper;
@@ -57,8 +60,15 @@ extern const struct hypervisor_x86 x86_hyper_kvm;
 extern void init_hypervisor_platform(void);
 extern bool hypervisor_x2apic_available(void);
 extern void hypervisor_pin_vcpu(int cpu);
+
+static inline void hypervisor_init_mem_mapping(void)
+{
+       if (x86_hyper && x86_hyper->init_mem_mapping)
+               x86_hyper->init_mem_mapping();
+}
 #else
 static inline void init_hypervisor_platform(void) { }
 static inline bool hypervisor_x2apic_available(void) { return false; }
+static inline void hypervisor_init_mem_mapping(void) { }
 #endif /* CONFIG_HYPERVISOR_GUEST */
 #endif /* _ASM_X86_HYPERVISOR_H */
index 7cf7c70b6ef2a20483361fb8333a85ace25bf1d7..0ee83321a3136fcca7a00a3b7e6c375e7a51e13f 100644 (file)
@@ -40,13 +40,16 @@ static void aperfmperf_snapshot_khz(void *dummy)
        struct aperfmperf_sample *s = this_cpu_ptr(&samples);
        ktime_t now = ktime_get();
        s64 time_delta = ktime_ms_delta(now, s->time);
+       unsigned long flags;
 
        /* Don't bother re-computing within the cache threshold time. */
        if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
                return;
 
+       local_irq_save(flags);
        rdmsrl(MSR_IA32_APERF, aperf);
        rdmsrl(MSR_IA32_MPERF, mperf);
+       local_irq_restore(flags);
 
        aperf_delta = aperf - s->aperf;
        mperf_delta = mperf - s->mperf;
index 673541eb3b3f16c8c029349d597d67f4bb83a77a..bf3f1065d6addb88b898ba3a86089cccff6ed15e 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/dma.h>           /* for MAX_DMA_PFN */
 #include <asm/microcode.h>
 #include <asm/kaslr.h>
+#include <asm/hypervisor.h>
 
 /*
  * We need to define the tracepoints somewhere, and tlb.c
@@ -636,6 +637,8 @@ void __init init_mem_mapping(void)
        load_cr3(swapper_pg_dir);
        __flush_tlb_all();
 
+       hypervisor_init_mem_mapping();
+
        early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
 }
 
index 87d791356ea9052f79f23e00f5d0024ff8cf0b7f..de503c225ae1f194b10c71b44528ad2a2a7a4c0d 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/setup.h>
 #include <asm/hypervisor.h>
 #include <asm/e820/api.h>
+#include <asm/early_ioremap.h>
 
 #include <asm/xen/cpuid.h>
 #include <asm/xen/hypervisor.h>
 #include "mmu.h"
 #include "smp.h"
 
-void __ref xen_hvm_init_shared_info(void)
+static unsigned long shared_info_pfn;
+
+void xen_hvm_init_shared_info(void)
 {
        struct xen_add_to_physmap xatp;
-       u64 pa;
-
-       if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {
-               /*
-                * Search for a free page starting at 4kB physical address.
-                * Low memory is preferred to avoid an EPT large page split up
-                * by the mapping.
-                * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
-                * the BIOS used for HVM guests is well behaved and won't
-                * clobber memory other than the first 4kB.
-                */
-               for (pa = PAGE_SIZE;
-                    !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
-                    memblock_is_reserved(pa);
-                    pa += PAGE_SIZE)
-                       ;
-
-               memblock_reserve(pa, PAGE_SIZE);
-               HYPERVISOR_shared_info = __va(pa);
-       }
 
        xatp.domid = DOMID_SELF;
        xatp.idx = 0;
        xatp.space = XENMAPSPACE_shared_info;
-       xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
+       xatp.gpfn = shared_info_pfn;
        if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
                BUG();
 }
 
+static void __init reserve_shared_info(void)
+{
+       u64 pa;
+
+       /*
+        * Search for a free page starting at 4kB physical address.
+        * Low memory is preferred to avoid an EPT large page split up
+        * by the mapping.
+        * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
+        * the BIOS used for HVM guests is well behaved and won't
+        * clobber memory other than the first 4kB.
+        */
+       for (pa = PAGE_SIZE;
+            !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
+            memblock_is_reserved(pa);
+            pa += PAGE_SIZE)
+               ;
+
+       shared_info_pfn = PHYS_PFN(pa);
+
+       memblock_reserve(pa, PAGE_SIZE);
+       HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
+}
+
+static void __init xen_hvm_init_mem_mapping(void)
+{
+       early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
+       HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
+}
+
 static void __init init_hvm_pv_info(void)
 {
        int major, minor;
@@ -153,6 +166,7 @@ static void __init xen_hvm_guest_init(void)
 
        init_hvm_pv_info();
 
+       reserve_shared_info();
        xen_hvm_init_shared_info();
 
        /*
@@ -218,5 +232,6 @@ const struct hypervisor_x86 x86_hyper_xen_hvm = {
        .init_platform          = xen_hvm_guest_init,
        .pin_vcpu               = xen_pin_vcpu,
        .x2apic_available       = xen_x2apic_para_available,
+       .init_mem_mapping       = xen_hvm_init_mem_mapping,
 };
 EXPORT_SYMBOL(x86_hyper_xen_hvm);
index 83e92beb3c9feb25f3be917e2b675f90b1544b5f..9b1ea478577b033195f34b457c85ad16eb759c6a 100644 (file)
@@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work)
  */
 bool __bio_integrity_endio(struct bio *bio)
 {
-       if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) {
-               struct bio_integrity_payload *bip = bio_integrity(bio);
+       struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+       struct bio_integrity_payload *bip = bio_integrity(bio);
 
+       if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
+           (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
                INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
                queue_work(kintegrityd_wq, &bip->bip_work);
                return false;
index 0c3354cf3552877d7542cdb32f0ce4102a79fcce..76944e3271bf34a730e62fa0b27266abce86942e 100644 (file)
@@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
        for (queue = 0; queue < set->nr_hw_queues; queue++) {
                mask = pci_irq_get_affinity(pdev, queue);
                if (!mask)
-                       return -EINVAL;
+                       goto fallback;
 
                for_each_cpu(cpu, mask)
                        set->mq_map[cpu] = queue;
        }
 
        return 0;
+
+fallback:
+       WARN_ON_ONCE(set->nr_hw_queues > 1);
+       for_each_possible_cpu(cpu)
+               set->mq_map[cpu] = 0;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
index 211ef367345f270f300cb5cdcb9a63c0a50d2420..4603b115e234887860cbb28520c36ba9e5e7efd8 100644 (file)
@@ -360,12 +360,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
                return ERR_PTR(ret);
 
        rq = blk_mq_get_request(q, NULL, op, &alloc_data);
+       blk_queue_exit(q);
 
        if (!rq)
                return ERR_PTR(-EWOULDBLOCK);
 
        blk_mq_put_ctx(alloc_data.ctx);
-       blk_queue_exit(q);
 
        rq->__data_len = 0;
        rq->__sector = (sector_t) -1;
@@ -411,12 +411,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
        alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
 
        rq = blk_mq_get_request(q, NULL, op, &alloc_data);
+       blk_queue_exit(q);
 
        if (!rq)
                return ERR_PTR(-EWOULDBLOCK);
 
-       blk_queue_exit(q);
-
        return rq;
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
@@ -684,8 +683,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
                                    unsigned long msecs)
 {
-       kblockd_schedule_delayed_work(&q->requeue_work,
-                                     msecs_to_jiffies(msecs));
+       kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
+                                   msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
 
index 4ac3e06b41d846440d35079dfa54885111eec0df..98aa8c808a3346dc559e2f419ae4378dec075ffc 100644 (file)
 #include <linux/kernel.h>
 #include <linux/serial_core.h>
 
+/*
+ * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
+ * occasionally getting stuck as 1. To avoid the potential for a hang, check
+ * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
+ * implementations, so only do so if an affected platform is detected in
+ * parse_spcr().
+ */
+bool qdf2400_e44_present;
+EXPORT_SYMBOL(qdf2400_e44_present);
+
 /*
  * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
  * Detect them by examining the OEM fields in the SPCR header, similiar to PCI
@@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon)
                goto done;
        }
 
-       if (qdf2400_erratum_44_present(&table->header))
-               uart = "qdf2400_e44";
+       /*
+        * If the E44 erratum is required, then we need to tell the pl011
+        * driver to implement the work-around.
+        *
+        * The global variable is used by the probe function when it
+        * creates the UARTs, whether or not they're used as a console.
+        *
+        * If the user specifies "traditional" earlycon, the qdf2400_e44
+        * console name matches the EARLYCON_DECLARE() statement, and
+        * SPCR is not used.  Parameter "earlycon" is false.
+        *
+        * If the user specifies "SPCR" earlycon, then we need to update
+        * the console name so that it also says "qdf2400_e44".  Parameter
+        * "earlycon" is true.
+        *
+        * For consistency, if we change the console name, then we do it
+        * for everyone, not just earlycon.
+        */
+       if (qdf2400_erratum_44_present(&table->header)) {
+               qdf2400_e44_present = true;
+               if (earlycon)
+                       uart = "qdf2400_e44";
+       }
+
        if (xgene_8250_erratum_present(table))
                iotype = "mmio32";
 
index b9f907eedbf770ee32359468d2b8d07e57bde667..bfbe1e15412889dfb40e699af7c3623a06d83253 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/syscore_ops.h>
 #include <linux/reboot.h>
 #include <linux/security.h>
-#include <linux/swait.h>
 
 #include <generated/utsrelease.h>
 
@@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void)
  * state of the firmware loading.
  */
 struct fw_state {
-       struct swait_queue_head wq;
+       struct completion completion;
        enum fw_status status;
 };
 
 static void fw_state_init(struct fw_state *fw_st)
 {
-       init_swait_queue_head(&fw_st->wq);
+       init_completion(&fw_st->completion);
        fw_st->status = FW_STATUS_UNKNOWN;
 }
 
@@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
 {
        long ret;
 
-       ret = swait_event_interruptible_timeout(fw_st->wq,
-                               __fw_state_is_done(READ_ONCE(fw_st->status)),
-                               timeout);
+       ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
        if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
                return -ENOENT;
        if (!ret)
@@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st,
        WRITE_ONCE(fw_st->status, status);
 
        if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
-               swake_up(&fw_st->wq);
+               complete_all(&fw_st->completion);
 }
 
 #define fw_state_start(fw_st)                                  \
        __fw_state_set(fw_st, FW_STATUS_LOADING)
 #define fw_state_done(fw_st)                                   \
        __fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_aborted(fw_st)                                        \
+       __fw_state_set(fw_st, FW_STATUS_ABORTED)
 #define fw_state_wait(fw_st)                                   \
        __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
 
-#ifndef CONFIG_FW_LOADER_USER_HELPER
-
-#define fw_state_is_aborted(fw_st)     false
-
-#else /* CONFIG_FW_LOADER_USER_HELPER */
-
 static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
 {
        return fw_st->status == status;
 }
 
+#define fw_state_is_aborted(fw_st)                             \
+       __fw_state_check(fw_st, FW_STATUS_ABORTED)
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+
 #define fw_state_aborted(fw_st)                                        \
        __fw_state_set(fw_st, FW_STATUS_ABORTED)
 #define fw_state_is_done(fw_st)                                        \
        __fw_state_check(fw_st, FW_STATUS_DONE)
 #define fw_state_is_loading(fw_st)                             \
        __fw_state_check(fw_st, FW_STATUS_LOADING)
-#define fw_state_is_aborted(fw_st)                             \
-       __fw_state_check(fw_st, FW_STATUS_ABORTED)
 #define fw_state_wait_timeout(fw_st, timeout)                  \
        __fw_state_wait_common(fw_st, timeout)
 
@@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
        return 1; /* need to load */
 }
 
+/*
+ * Batched requests need only one wake, we need to do this step last due to the
+ * fallback mechanism. The buf is protected with kref_get(), and it won't be
+ * released until the last user calls release_firmware().
+ *
+ * Failed batched requests are possible as well, in such cases we just share
+ * the struct firmware_buf and won't release it until all requests are woken
+ * and have gone through this same path.
+ */
+static void fw_abort_batch_reqs(struct firmware *fw)
+{
+       struct firmware_buf *buf;
+
+       /* Loaded directly? */
+       if (!fw || !fw->priv)
+               return;
+
+       buf = fw->priv;
+       if (!fw_state_is_aborted(&buf->fw_st))
+               fw_state_aborted(&buf->fw_st);
+}
+
 /* called from request_firmware() and request_firmware_work_func() */
 static int
 _request_firmware(const struct firmware **firmware_p, const char *name,
@@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
 
  out:
        if (ret < 0) {
+               fw_abort_batch_reqs(fw);
                release_firmware(fw);
                fw = NULL;
        }
index 6b16ead1da5871abcef5b2233733f281158596a8..ad9749463d4fa9a382afa7f24587bbbe3a2efcc9 100644 (file)
@@ -875,6 +875,56 @@ static void print_version(void)
                printk(KERN_INFO "%s", version);
 }
 
+struct vdc_check_port_data {
+       int     dev_no;
+       char    *type;
+};
+
+static int vdc_device_probed(struct device *dev, void *arg)
+{
+       struct vio_dev *vdev = to_vio_dev(dev);
+       struct vdc_check_port_data *port_data;
+
+       port_data = (struct vdc_check_port_data *)arg;
+
+       if ((vdev->dev_no == port_data->dev_no) &&
+           (!(strcmp((char *)&vdev->type, port_data->type))) &&
+               dev_get_drvdata(dev)) {
+               /* This device has already been configured
+                * by vdc_port_probe()
+                */
+               return 1;
+       } else {
+               return 0;
+       }
+}
+
+/* Determine whether the VIO device is part of an mpgroup
+ * by locating all the virtual-device-port nodes associated
+ * with the parent virtual-device node for the VIO device
+ * and checking whether any of these nodes are vdc-ports
+ * which have already been configured.
+ *
+ * Returns true if this device is part of an mpgroup and has
+ * already been probed.
+ */
+static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
+{
+       struct vdc_check_port_data port_data;
+       struct device *dev;
+
+       port_data.dev_no = vdev->dev_no;
+       port_data.type = (char *)&vdev->type;
+
+       dev = device_find_child(vdev->dev.parent, &port_data,
+                               vdc_device_probed);
+
+       if (dev)
+               return true;
+
+       return false;
+}
+
 static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 {
        struct mdesc_handle *hp;
@@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
                goto err_out_release_mdesc;
        }
 
+       /* Check if this device is part of an mpgroup */
+       if (vdc_port_mpgroup_check(vdev)) {
+               printk(KERN_WARNING
+                       "VIO: Ignoring extra vdisk port %s",
+                       dev_name(&vdev->dev));
+               goto err_out_release_mdesc;
+       }
+
        port = kzalloc(sizeof(*port), GFP_KERNEL);
        err = -ENOMEM;
        if (!port) {
@@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        if (err)
                goto err_out_free_tx_ring;
 
+       /* Note that the device driver_data is used to determine
+        * whether the port has been probed.
+        */
        dev_set_drvdata(&vdev->dev, port);
 
        mdesc_release(hp);
index 98e34e4c62b8b228a6ee6d37256e860ee7d3c2b7..2468c28d477110e6bcb2cbf5225dd481c6ec5b95 100644 (file)
@@ -2075,9 +2075,9 @@ static int blkfront_resume(struct xenbus_device *dev)
                        /*
                         * Get the bios in the request so we can re-queue them.
                         */
-                       if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
-                           req_op(shadow[i].request) == REQ_OP_DISCARD ||
-                           req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+                       if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
+                           req_op(shadow[j].request) == REQ_OP_DISCARD ||
+                           req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
                            shadow[j].request->cmd_flags & REQ_FUA) {
                                /*
                                 * Flush operations don't contain bios, so
index 856d5dc02451d44b59695127994017877cd02b38..3b1b6340ba13a2977ffd0a13424ce95322f67f0e 100644 (file)
@@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
        struct zram *zram = dev_to_zram(dev);
-       char compressor[CRYPTO_MAX_ALG_NAME];
+       char compressor[ARRAY_SIZE(zram->compressor)];
        size_t sz;
 
        strlcpy(compressor, buf, sizeof(compressor));
@@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
                return -EBUSY;
        }
 
-       strlcpy(zram->compressor, compressor, sizeof(compressor));
+       strcpy(zram->compressor, compressor);
        up_write(&zram->init_lock);
        return len;
 }
index fcae5ca6ac9234debecf5a4f84521405aa64a74c..54a67f8a28ebfb929610ea07697d7c7e4eb33de1 100644 (file)
@@ -262,7 +262,7 @@ config CLKSRC_LPC32XX
 
 config CLKSRC_PISTACHIO
        bool "Clocksource for Pistachio SoC" if COMPILE_TEST
-       depends on HAS_IOMEM
+       depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
        select TIMER_OF
        help
          Enables the clocksource for the Pistachio SoC.
index aae87c4c546ee06203195654087cd9ef8b43d57a..72bbfccef1132c3a962d9e93d4bd6c459f1b368d 100644 (file)
@@ -1440,7 +1440,7 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count)
         * While unlikely, it's theoretically possible that none of the frames
         * in a timer expose the combination of feature we want.
         */
-       for (i = i; i < timer_count; i++) {
+       for (i = 0; i < timer_count; i++) {
                timer = &timers[i];
 
                frame = arch_timer_mem_find_best_frame(timer);
index bc48cbf6a7957198d4ecd6bf91840df350843d48..269db74a065815e2211194fbda9774dcf1e80041 100644 (file)
@@ -305,7 +305,7 @@ static int em_sti_probe(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "failed to get irq\n");
-               return -EINVAL;
+               return irq;
        }
 
        /* map memory, let base point to the STI instance */
@@ -314,11 +314,12 @@ static int em_sti_probe(struct platform_device *pdev)
        if (IS_ERR(p->base))
                return PTR_ERR(p->base);
 
-       if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
-                            IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
-                            dev_name(&pdev->dev), p)) {
+       ret = devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
+                              IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+                              dev_name(&pdev->dev), p);
+       if (ret) {
                dev_err(&pdev->dev, "failed to request low IRQ\n");
-               return -ENOENT;
+               return ret;
        }
 
        /* get hold of clock */
index d509b500a7b5f8565514352a0313cb4704976042..4d7aef9d9c15422df0e9973e72379b2e21256f47 100644 (file)
@@ -128,9 +128,9 @@ static __init int timer_base_init(struct device_node *np,
        const char *name = of_base->name ? of_base->name : np->full_name;
 
        of_base->base = of_io_request_and_map(np, of_base->index, name);
-       if (!of_base->base) {
+       if (IS_ERR(of_base->base)) {
                pr_err("Failed to iomap (%s)\n", name);
-               return -ENXIO;
+               return PTR_ERR(of_base->base);
        }
 
        return 0;
index 0566455f233ed3cd663506321f08f7ae546862be..65ee4fcace1f260632cf53c150c3260cc27240ad 100644 (file)
@@ -1613,8 +1613,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
 
 static inline int32_t get_avg_frequency(struct cpudata *cpu)
 {
-       return mul_ext_fp(cpu->sample.core_avg_perf,
-                         cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
+       return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
 }
 
 static inline int32_t get_avg_pstate(struct cpudata *cpu)
index 37b0698b7193e60be4107a8be107285e55c65877..42896a67aeae38325cbda2acb7ca655c1b43915c 100644 (file)
@@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
        return -1;
 }
 
+extern u32 pnv_get_supported_cpuidle_states(void);
 static int powernv_add_idle_states(void)
 {
        struct device_node *power_mgt;
@@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
        const char *names[CPUIDLE_STATE_MAX];
        u32 has_stop_states = 0;
        int i, rc;
+       u32 supported_flags = pnv_get_supported_cpuidle_states();
+
 
        /* Currently we have snooze statically defined */
 
@@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
        for (i = 0; i < dt_idle_states; i++) {
                unsigned int exit_latency, target_residency;
                bool stops_timebase = false;
+
+               /*
+                * Skip the platform idle state whose flag isn't in
+                * the supported_cpuidle_states flag mask.
+                */
+               if ((flags[i] & supported_flags) != flags[i])
+                       continue;
                /*
                 * If an idle state has exit latency beyond
                 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
index 427cbe01272926acd38eba211dceb28fec61db64..dadc4a808df5a37764adf45df9f90dc1fce5b03a 100644 (file)
@@ -1073,7 +1073,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
                req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
                                &crypt->icv_rev_aes);
                if (unlikely(!req_ctx->hmac_virt))
-                       goto free_buf_src;
+                       goto free_buf_dst;
                if (!encrypt) {
                        scatterwalk_map_and_copy(req_ctx->hmac_virt,
                                req->src, cryptlen, authsize, 0);
@@ -1088,10 +1088,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
        BUG_ON(qmgr_stat_overflow(SEND_QID));
        return -EINPROGRESS;
 
-free_buf_src:
-       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 free_buf_dst:
        free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
        crypt->ctl_flags = CTL_FLAG_UNUSED;
        return -ENOMEM;
 }
index d7e219d2669daf01c935bd18c0836cb88a34e075..66fb40d0ebdbbec521499cd58cf2e1d55c195878 100644 (file)
@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
 {
        struct sync_file *sync_file = file->private_data;
 
-       if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+       if (test_bit(POLL_ENABLED, &sync_file->flags))
                dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
        dma_fence_put(sync_file->fence);
        kfree(sync_file);
@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &sync_file->wq, wait);
 
-       if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+       if (list_empty(&sync_file->cb.node) &&
+           !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
                if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
                                           fence_check_cb_func) < 0)
                        wake_up_all(&sync_file->wq);
index a6899180b265721831837282e6c66d12a8bccf48..c586f44312f9772fb93f8b1442827c842d610594 100644 (file)
@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
                struct dma_fence *f = e->fence;
                struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
+               if (dma_fence_is_signaled(f)) {
+                       hash_del(&e->node);
+                       dma_fence_put(f);
+                       kmem_cache_free(amdgpu_sync_slab, e);
+                       continue;
+               }
                if (ring && s_fence) {
                        /* For fences from the same ring it is sufficient
                         * when they are scheduled.
@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
                        }
                }
 
-               if (dma_fence_is_signaled(f)) {
-                       hash_del(&e->node);
-                       dma_fence_put(f);
-                       kmem_cache_free(amdgpu_sync_slab, e);
-                       continue;
-               }
-
                return f;
        }
 
index 5c26488e7a2d7a0320ddf321375b8ff4c200185f..0529e500c5341ed5e0d93bb658cd42d9288b13ad 100644 (file)
@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
        /* port@2 is the output port */
        ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
-       if (ret)
+       if (ret && ret != -ENODEV)
                return ret;
 
        /* Shut down GPIO is optional */
index 5bd93169dac2059a0981cc2f24b8c9032447ba9e..6463fc2c736fd4db5881a259b21848328b7f6cea 100644 (file)
@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
                if (ret)
                        return ret;
 
-               if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
-                       DRM_ERROR("relocation %u outside object", i);
+               if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
+                       DRM_ERROR("relocation %u outside object\n", i);
                        return -EINVAL;
                }
 
index d48fd7c918f880df0b3a27da5e8fa4f09c011b04..73217c281c9a87e51ac2a3d8ddf235b227ba2a1d 100644 (file)
@@ -145,13 +145,19 @@ static struct drm_framebuffer *
 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                      const struct drm_mode_fb_cmd2 *mode_cmd)
 {
+       const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
        struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
        struct drm_gem_object *obj;
        struct drm_framebuffer *fb;
        int i;
        int ret;
 
-       for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+       for (i = 0; i < info->num_planes; i++) {
+               unsigned int height = (i == 0) ? mode_cmd->height :
+                                    DIV_ROUND_UP(mode_cmd->height, info->vsub);
+               unsigned long size = height * mode_cmd->pitches[i] +
+                                    mode_cmd->offsets[i];
+
                obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
                if (!obj) {
                        DRM_ERROR("failed to lookup gem object\n");
@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                }
 
                exynos_gem[i] = to_exynos_gem(obj);
+
+               if (size > exynos_gem[i]->size) {
+                       i++;
+                       ret = -EINVAL;
+                       goto err;
+               }
        }
 
        fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
index 700050556242480e6fbf8eb4a8d97c6307e9390d..1648887d3f55248cf055524a2f0e341062f0cd8d 100644 (file)
@@ -46,6 +46,8 @@
 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
                ((a)->lrca == (b)->lrca))
 
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
+
 static int context_switch_events[] = {
        [RCS] = RCS_AS_CONTEXT_SWITCH,
        [BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
-       struct intel_vgpu_execlist *execlist =
-               &vgpu->execlist[workload->ring_id];
+       int ring_id = workload->ring_id;
+       struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
        struct intel_vgpu_workload *next_workload;
-       struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+       struct list_head *next = workload_q_head(vgpu, ring_id)->next;
        bool lite_restore = false;
        int ret;
 
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
        release_shadow_batch_buffer(workload);
        release_shadow_wa_ctx(&workload->wa_ctx);
 
-       if (workload->status || vgpu->resetting)
+       if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+               /* if workload->status is not successful means HW GPU
+                * has occurred GPU hang or something wrong with i915/GVT,
+                * and GVT won't inject context switch interrupt to guest.
+                * So this error is a vGPU hang actually to the guest.
+                * According to this we should emunlate a vGPU hang. If
+                * there are pending workloads which are already submitted
+                * from guest, we should clean them up like HW GPU does.
+                *
+                * if it is in middle of engine resetting, the pending
+                * workloads won't be submitted to HW GPU and will be
+                * cleaned up during the resetting process later, so doing
+                * the workload clean up here doesn't have any impact.
+                **/
+               clean_workloads(vgpu, ENGINE_MASK(ring_id));
                goto out;
+       }
 
-       if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+       if (!list_empty(workload_q_head(vgpu, ring_id))) {
                struct execlist_ctx_descriptor_format *this_desc, *next_desc;
 
                next_workload = container_of(next,
index 5dad9298b2d5dbbe7b626895806e6008047bbd6a..a26c1705430eb2134d002b68ddcb26d272684bd9 100644 (file)
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
        struct intel_gvt_device_info *info = &gvt->device_info;
        struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
        struct intel_gvt_mmio_info *e;
+       struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+       int num = gvt->mmio.num_mmio_block;
        struct gvt_firmware_header *h;
        void *firmware;
        void *p;
        unsigned long size, crc32_start;
-       int i;
+       int i, j;
        int ret;
 
        size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
        hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
                *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
 
+       for (i = 0; i < num; i++, block++) {
+               for (j = 0; j < block->size; j += 4)
+                       *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
+                               I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
+                                                       block->offset) + j));
+       }
+
        memcpy(gvt->firmware.mmio, p, info->mmio_size);
 
        crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
index 3a74e79eac2f6c13fef32e1611b539db7b8f46c3..2964a4d01a66da5d2fb06d256ed35fa83fa96a38 100644 (file)
@@ -149,7 +149,7 @@ struct intel_vgpu {
        bool active;
        bool pv_notified;
        bool failsafe;
-       bool resetting;
+       unsigned int resetting_eng;
        void *sched_data;
        struct vgpu_sched_ctl sched_ctl;
 
@@ -195,6 +195,15 @@ struct intel_gvt_fence {
        unsigned long vgpu_allocated_fence_num;
 };
 
+/* Special MMIO blocks. */
+struct gvt_mmio_block {
+       unsigned int device;
+       i915_reg_t   offset;
+       unsigned int size;
+       gvt_mmio_func read;
+       gvt_mmio_func write;
+};
+
 #define INTEL_GVT_MMIO_HASH_BITS 11
 
 struct intel_gvt_mmio {
@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
 /* This reg could be accessed by unaligned address */
 #define F_UNALIGN      (1 << 6)
 
+       struct gvt_mmio_block *mmio_block;
+       unsigned int num_mmio_block;
+
        DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
        unsigned int num_tracked_mmio;
 };
index 17febe830ff6984e06bb81cb91601a76b67d5f2a..feed9921b3b3eb05e6e8dce5e1b510f4d6fc9479 100644 (file)
@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        return 0;
 }
 
-/* Special MMIO blocks. */
-static struct gvt_mmio_block {
-       unsigned int device;
-       i915_reg_t   offset;
-       unsigned int size;
-       gvt_mmio_func read;
-       gvt_mmio_func write;
-} gvt_mmio_blocks[] = {
-       {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
-       {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
-       {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
-               pvinfo_mmio_read, pvinfo_mmio_write},
-       {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
-       {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
-       {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
-};
-
 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
                                              unsigned int offset)
 {
        unsigned long device = intel_gvt_get_device_type(gvt);
-       struct gvt_mmio_block *block = gvt_mmio_blocks;
+       struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+       int num = gvt->mmio.num_mmio_block;
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
+       for (i = 0; i < num; i++, block++) {
                if (!(device & block->device))
                        continue;
                if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
        gvt->mmio.mmio_attribute = NULL;
 }
 
+/* Special MMIO blocks. */
+static struct gvt_mmio_block mmio_blocks[] = {
+       {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+       {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+       {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
+               pvinfo_mmio_read, pvinfo_mmio_write},
+       {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
+       {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
+       {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
+};
+
 /**
  * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
  * @gvt: GVT device
@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
                        goto err;
        }
 
+       gvt->mmio.mmio_block = mmio_blocks;
+       gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
+
        gvt_dbg_mmio("traced %u virtual mmio registers\n",
                     gvt->mmio.num_tracked_mmio);
        return 0;
@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
        gvt_mmio_func func;
        int ret;
 
-       if (WARN_ON(bytes > 4))
+       if (WARN_ON(bytes > 8))
                return -EINVAL;
 
        /*
index 4f7057d62d88b393ce77670f9100bc2d3b246014..22e08eb2d0b7c66faf01741656fb33d1535925f3 100644 (file)
@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
                i915_gem_request_put(fetch_and_zero(&workload->req));
 
-               if (!workload->status && !vgpu->resetting) {
+               if (!workload->status && !(vgpu->resetting_eng &
+                                          ENGINE_MASK(ring_id))) {
                        update_guest_context(workload);
 
                        for_each_set_bit(event, workload->pending_events,
index 90c14e6e3ea06b8de36d90284132659eb80f72c6..3deadcbd5a245c039169f1a10c6c91cc791d3a66 100644 (file)
@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 {
        struct intel_gvt *gvt = vgpu->gvt;
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+       unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
 
        gvt_dbg_core("------------------------------------------\n");
        gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
                     vgpu->id, dmlr, engine_mask);
-       vgpu->resetting = true;
+
+       vgpu->resetting_eng = resetting_eng;
 
        intel_vgpu_stop_schedule(vgpu);
        /*
@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                mutex_lock(&gvt->lock);
        }
 
-       intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+       intel_vgpu_reset_execlist(vgpu, resetting_eng);
 
        /* full GPU reset or device model level reset */
        if (engine_mask == ALL_ENGINES || dmlr) {
@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                }
        }
 
-       vgpu->resetting = false;
+       vgpu->resetting_eng = 0;
        gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
        gvt_dbg_core("------------------------------------------\n");
 }
index 00d8967c8512048f5fb46f629c9c76ace1161bbf..d1bd53b73738446f5d01cc7aae2b585f675de6b7 100644 (file)
@@ -4580,7 +4580,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
 
                sseu->slice_mask |= BIT(s);
 
-               if (IS_GEN9_BC(dev_priv))
+               if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
                        sseu->subslice_mask =
                                INTEL_INFO(dev_priv)->sseu.subslice_mask;
 
index 39ed58a21fc1f517f56d179d48b3b9bef00e9410..e1e971ee2ed57aae59dd505a49e8a6ee546e58ff 100644 (file)
@@ -688,19 +688,19 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static bool
-needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
-                 struct intel_engine_cs *engine,
-                 struct i915_gem_context *to)
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
 {
+       struct i915_gem_context *from = engine->legacy_active_context;
+
        if (!ppgtt)
                return false;
 
        /* Always load the ppgtt on first use */
-       if (!engine->legacy_active_context)
+       if (!from)
                return true;
 
        /* Same context without new entries, skip */
-       if (engine->legacy_active_context == to &&
+       if ((!from->ppgtt || from->ppgtt == ppgtt) &&
            !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
                return false;
 
@@ -744,7 +744,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
        if (skip_rcs_switch(ppgtt, engine, to))
                return 0;
 
-       if (needs_pd_load_pre(ppgtt, engine, to)) {
+       if (needs_pd_load_pre(ppgtt, engine)) {
                /* Older GENs and non render rings still want the load first,
                 * "PP_DCLV followed by PP_DIR_BASE register through Load
                 * Register Immediate commands in Ring Buffer before submitting
@@ -841,7 +841,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
                struct i915_hw_ppgtt *ppgtt =
                        to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
 
-               if (needs_pd_load_pre(ppgtt, engine, to)) {
+               if (needs_pd_load_pre(ppgtt, engine)) {
                        int ret;
 
                        trace_switch_mm(engine, to);
@@ -852,6 +852,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
                        ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
                }
 
+               engine->legacy_active_context = to;
                return 0;
        }
 
index 7032c542a9b1d007c4bc69e328db040b88efc15d..4dd4c2159a92e26d91fd98e560c587a91b964e48 100644 (file)
@@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
                        goto err_unpin;
        }
 
+       ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
+       if (ret)
+               goto err_unpin;
+
        ret = req->engine->emit_bb_start(req,
                                         so->batch_offset, so->batch_size,
                                         I915_DISPATCH_SECURE);
index 1032f98add112a66a19fb186a2b28de773caadf8..77fb3980813143d2d9e3432c0ebb994a4bcad032 100644 (file)
@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
                return true;
 
        case MUTEX_TRYLOCK_FAILED:
+               *unlock = false;
+               preempt_disable();
                do {
                        cpu_relax();
                        if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
-       case MUTEX_TRYLOCK_SUCCESS:
                                *unlock = true;
-                               return true;
+                               break;
                        }
                } while (!need_resched());
+               preempt_enable();
+               return *unlock;
 
-               return false;
+       case MUTEX_TRYLOCK_SUCCESS:
+               *unlock = true;
+               return true;
        }
 
        BUG();
index 9cd22f83b0cfaee680ed06c5bde67db6fc89d0fa..f33d90226704108e71ee5662e01977e32b627fcb 100644 (file)
@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
        u32 *cs;
        int i;
 
-       cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
+       cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
-       *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
+       *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
 
        *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
        *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
index 306c6b06b330bfc57f75a992c60468cb9d88e81c..17c4ae7e4e7c51e85de97cb8c803b280115de8cf 100644 (file)
@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
                }
 
                /* Program the max register to clamp values > 1.0. */
+               i = lut_size - 1;
                I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
                           drm_color_lut_extract(lut[i].red, 16));
                I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
index 9edeaaef77adf6c38e23733769e271147698f02a..d3b3252a874252641af555d8797a72e20056a34b 100644 (file)
@@ -1762,7 +1762,7 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv,
        if (dev_priv->vbt.edp.low_vswing) {
                if (voltage == VOLTAGE_INFO_0_85V) {
                        *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
-                       return cnl_ddi_translations_dp_0_85V;
+                       return cnl_ddi_translations_edp_0_85V;
                } else if (voltage == VOLTAGE_INFO_0_95V) {
                        *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
                        return cnl_ddi_translations_edp_0_95V;
index 9471c88d449eaf84adf3411d61135bf090be9b96..cc484b56eeaa33213a0832f43770132a1b165df1 100644 (file)
@@ -3485,6 +3485,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
            !gpu_reset_clobbers_display(dev_priv))
                return;
 
+       /* We have a modeset vs reset deadlock, defensively unbreak it.
+        *
+        * FIXME: We can do a _lot_ better, this is just a first iteration.
+        */
+       i915_gem_set_wedged(dev_priv);
+       DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n");
+
        /*
         * Need mode_config.mutex so that we don't
         * trample ongoing ->detect() and whatnot.
index 52b3a1fd4059bab91a898f7f498a79af2c117a4b..57ef5833c4274958f3eed9f39b66a598dcfd1cd7 100644 (file)
@@ -63,7 +63,6 @@ enum {
 };
 
 /* Logical Rings */
-void intel_logical_ring_stop(struct intel_engine_cs *engine);
 void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
 int logical_render_ring_init(struct intel_engine_cs *engine);
 int logical_xcs_ring_init(struct intel_engine_cs *engine);
index 96c2cbd81869e7e55dedc8ce09f53938466bee70..593349be8b9dfce328d20ea3ca5d1b22e41da320 100644 (file)
@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
 
        if (i915.invert_brightness > 0 ||
            dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
-               return panel->backlight.max - val;
+               return panel->backlight.max - val + panel->backlight.min;
        }
 
        return val;
index b638d192ce5e046cc29e87459cc36e988e498ea8..99d39b2aefa675941d42c86b3c9b5a4d2cda937b 100644 (file)
@@ -5,7 +5,7 @@ config DRM_MSM
        depends on ARCH_QCOM || (ARM && COMPILE_TEST)
        depends on OF && COMMON_CLK
        depends on MMU
-       select QCOM_MDT_LOADER
+       select QCOM_MDT_LOADER if ARCH_QCOM
        select REGULATOR
        select DRM_KMS_HELPER
        select DRM_PANEL
index b4b54f1c24bc1995a032493838d99e8e31dff9e9..f9eae03aa1dcaef072974d60216fb6b09ef81e66 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/cpumask.h>
 #include <linux/qcom_scm.h>
 #include <linux/dma-mapping.h>
-#include <linux/of_reserved_mem.h>
+#include <linux/of_address.h>
 #include <linux/soc/qcom/mdt_loader.h>
 #include "msm_gem.h"
 #include "msm_mmu.h"
@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
 
 #define GPU_PAS_ID 13
 
-#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
-
 static int zap_shader_load_mdt(struct device *dev, const char *fwname)
 {
        const struct firmware *fw;
+       struct device_node *np;
+       struct resource r;
        phys_addr_t mem_phys;
        ssize_t mem_size;
        void *mem_region = NULL;
        int ret;
 
+       if (!IS_ENABLED(CONFIG_ARCH_QCOM))
+               return -EINVAL;
+
+       np = of_get_child_by_name(dev->of_node, "zap-shader");
+       if (!np)
+               return -ENODEV;
+
+       np = of_parse_phandle(np, "memory-region", 0);
+       if (!np)
+               return -EINVAL;
+
+       ret = of_address_to_resource(np, 0, &r);
+       if (ret)
+               return ret;
+
+       mem_phys = r.start;
+       mem_size = resource_size(&r);
+
        /* Request the MDT file for the firmware */
        ret = request_firmware(&fw, fwname, dev);
        if (ret) {
@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
        }
 
        /* Allocate memory for the firmware image */
-       mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
+       mem_region = memremap(mem_phys, mem_size,  MEMREMAP_WC);
        if (!mem_region) {
                ret = -ENOMEM;
                goto out;
@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
                DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
 
 out:
+       if (mem_region)
+               memunmap(mem_region);
+
        release_firmware(fw);
 
        return ret;
 }
-#else
-static int zap_shader_load_mdt(struct device *dev, const char *fwname)
-{
-       return -ENODEV;
-}
-#endif
 
 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        struct msm_file_private *ctx)
@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        gpu->funcs->flush(gpu);
 }
 
-struct a5xx_hwcg {
+static const struct {
        u32 offset;
        u32 value;
-};
-
-static const struct a5xx_hwcg a530_hwcg[] = {
+} a5xx_hwcg[] = {
        {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
        {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
        {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
        {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
 };
 
-static const struct {
-       int (*test)(struct adreno_gpu *gpu);
-       const struct a5xx_hwcg *regs;
-       unsigned int count;
-} a5xx_hwcg_regs[] = {
-       { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
-};
-
-static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
-               const struct a5xx_hwcg *regs, unsigned int count)
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
 {
        unsigned int i;
 
-       for (i = 0; i < count; i++)
-               gpu_write(gpu, regs[i].offset, regs[i].value);
+       for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+               gpu_write(gpu, a5xx_hwcg[i].offset,
+                       state ? a5xx_hwcg[i].value : 0);
 
-       gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
-       gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
-}
-
-static void a5xx_enable_hwcg(struct msm_gpu *gpu)
-{
-       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
-               if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
-                       _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
-                               a5xx_hwcg_regs[i].count);
-                       return;
-               }
-       }
+       gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+       gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
 }
 
 static int a5xx_me_init(struct msm_gpu *gpu)
@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
        return ret;
 }
 
-/* Set up a child device to "own" the zap shader */
-static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
-{
-       struct device_node *node;
-       int ret;
-
-       if (dev->parent)
-               return 0;
-
-       /* Find the sub-node for the zap shader */
-       node = of_get_child_by_name(parent->of_node, "zap-shader");
-       if (!node) {
-               DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
-               return -ENODEV;
-       }
-
-       dev->parent = parent;
-       dev->of_node = node;
-       dev_set_name(dev, "adreno_zap_shader");
-
-       ret = device_register(dev);
-       if (ret) {
-               DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
-               goto out;
-       }
-
-       ret = of_reserved_mem_device_init(dev);
-       if (ret) {
-               DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
-               device_unregister(dev);
-       }
-
-out:
-       if (ret)
-               dev->parent = NULL;
-
-       return ret;
-}
-
 static int a5xx_zap_shader_init(struct msm_gpu *gpu)
 {
        static bool loaded;
@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
                return -ENODEV;
        }
 
-       ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
-
-       if (!ret)
-               ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
-                       adreno_gpu->info->zapfw);
+       ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
 
        loaded = !ret;
 
@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
        gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
 
        /* Enable HWCG */
-       a5xx_enable_hwcg(gpu);
+       a5xx_set_hwcg(gpu, true);
 
        gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
 
@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
 
        DBG("%s", gpu->name);
 
-       if (a5xx_gpu->zap_dev.parent)
-               device_unregister(&a5xx_gpu->zap_dev);
-
        if (a5xx_gpu->pm4_bo) {
                if (a5xx_gpu->pm4_iova)
                        msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
        0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
        0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
        0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
-       0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
-       0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
-       0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
-       0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
-       0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
-       0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
-       0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
-       0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
-       0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
-       0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
-       0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
-       0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
-       0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
-       0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
-       0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
-       0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
-       0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
-       0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
-       0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
-       0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
-       0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
-       0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
-       0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
-       0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
-       ~0
+       0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
+       0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
+       0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
+       0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
+       0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
+       0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
+       0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
+       0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
+       0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
+       0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
+       0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
+       0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
+       0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
+       0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
+       0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
+       0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
+       0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
+       0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
+       0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
+       0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
+       0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
+       0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
+       0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
+       0xB9A0, 0xB9BF, ~0
 };
 
 static void a5xx_dump(struct msm_gpu *gpu)
@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
 {
        seq_printf(m, "status:   %08x\n",
                        gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+
+       /*
+        * Temporarily disable hardware clock gating before going into
+        * adreno_show to avoid issues while reading the registers
+        */
+       a5xx_set_hwcg(gpu, false);
        adreno_show(gpu, m);
+       a5xx_set_hwcg(gpu, true);
 }
 #endif
 
index 6638bc85645dbad4adf3689bd7d9bae9441173c2..1137092241d593c34e4607e3c723acfb74861972 100644 (file)
@@ -36,8 +36,6 @@ struct a5xx_gpu {
        uint32_t gpmu_dwords;
 
        uint32_t lm_leakage;
-
-       struct device zap_dev;
 };
 
 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
 }
 
 bool a5xx_idle(struct msm_gpu *gpu);
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
 
 #endif /* __A5XX_GPU_H__ */
index f1ab2703674a2f5d4f533828bb6c8b49df24f571..7414c6bbd582e9597e502305885f0dec909859be 100644 (file)
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
                *value = adreno_gpu->base.fast_rate;
                return 0;
        case MSM_PARAM_TIMESTAMP:
-               if (adreno_gpu->funcs->get_timestamp)
-                       return adreno_gpu->funcs->get_timestamp(gpu, value);
+               if (adreno_gpu->funcs->get_timestamp) {
+                       int ret;
+
+                       pm_runtime_get_sync(&gpu->pdev->dev);
+                       ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+                       pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+                       return ret;
+               }
                return -EINVAL;
        default:
                DBG("%s: invalid param: %u", gpu->name, param);
index 9e9c5696bc03547b813ecf2ae56c535265e64bcd..c7b612c3d7717a02d8d64be21dea68f183163917 100644 (file)
@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
        struct msm_dsi_phy_clk_request *clk_req)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+       int ret;
+
+       ret = dsi_calc_clk_rate(msm_host);
+       if (ret) {
+               pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+               return;
+       }
 
        clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
        clk_req->escclk_rate = msm_host->esc_clk_rate;
@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
                                        struct drm_display_mode *mode)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-       int ret;
 
        if (msm_host->mode) {
                drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
                return -ENOMEM;
        }
 
-       ret = dsi_calc_clk_rate(msm_host);
-       if (ret) {
-               pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
-               return ret;
-       }
-
        return 0;
 }
 
index cb5415d6c04b7ab6e1e80503d26b32891a934dee..735a87a699fafafb99b179752e0e7f3c19491389 100644 (file)
@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
        struct mdp5_ctl *ctl = mdp5_cstate->ctl;
        uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
        unsigned long flags;
-       enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
-       enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
+       enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+       enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
        int i, plane_cnt = 0;
        bool bg_alpha_enabled = false;
        u32 mixer_op_mode = 0;
@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        if (!handle) {
                DBG("Cursor off");
                cursor_enable = false;
+               mdp5_enable(mdp5_kms);
                goto set_cursor;
        }
 
@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 
        get_roi(crtc, &roi_w, &roi_h);
 
+       mdp5_enable(mdp5_kms);
+
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
                        MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -804,6 +807,7 @@ set_cursor:
        crtc_flush(crtc, flush_mask);
 
 end:
+       mdp5_disable(mdp5_kms);
        if (old_bo) {
                drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
                /* enable vblank to complete cursor work: */
@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 
        get_roi(crtc, &roi_w, &roi_h);
 
+       mdp5_enable(mdp5_kms);
+
        spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
                        MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 
        crtc_flush(crtc, flush_mask);
 
+       mdp5_disable(mdp5_kms);
+
        return 0;
 }
 
index 97f3294fbfc6f9d26f453dac36d5dbb3cb00e93a..70bef51245af89d5bbb292f4495f2fd79dcc9e33 100644 (file)
@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
        struct mdp5_interface *intf = mdp5_encoder->intf;
 
        if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
-               mdp5_cmd_encoder_disable(encoder);
+               mdp5_cmd_encoder_enable(encoder);
        else
                mdp5_vid_encoder_enable(encoder);
 }
index 5d13fa5381ee37705a0c282bf023b4782fc19268..1c603aef3c59cdff286ce38e84c3e6a4745dd0c2 100644 (file)
@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
                const char *name, bool mandatory)
 {
        struct device *dev = &pdev->dev;
-       struct clk *clk = devm_clk_get(dev, name);
+       struct clk *clk = msm_clk_get(pdev, name);
        if (IS_ERR(clk) && mandatory) {
                dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
                return PTR_ERR(clk);
@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
        }
 
        /* mandatory clocks: */
-       ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
        if (ret)
                goto fail;
-       ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
        if (ret)
                goto fail;
-       ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
        if (ret)
                goto fail;
-       ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
+       ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
        if (ret)
                goto fail;
 
        /* optional clocks: */
-       get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
+       get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
 
        /* we need to set a default rate before enabling.  Set a safe
         * rate first, then figure out hw revision, and then set a
index fe3a4de1a4331ff86f0b4f0cc85a48b208bca3b5..61f39c86dd09e53a5860ce880b0f7955e5008c05 100644 (file)
@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
        struct mdp5_hw_pipe *right_hwpipe;
        const struct mdp_format *format;
        uint32_t nplanes, config = 0;
-       struct phase_step step = { 0 };
-       struct pixel_ext pe = { 0 };
+       struct phase_step step = { { 0 } };
+       struct pixel_ext pe = { { 0 } };
        uint32_t hdecm = 0, vdecm = 0;
        uint32_t pix_format;
        unsigned int rotation;
index 65f35544c1ec8859018c2afb713fa5120fc43272..a0c60e738db8d7be5e841311832e82f0b45bd7fa 100644 (file)
@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
                struct page **pages;
 
                vma = add_vma(obj, aspace);
-               if (IS_ERR(vma))
-                       return PTR_ERR(vma);
+               if (IS_ERR(vma)) {
+                       ret = PTR_ERR(vma);
+                       goto unlock;
+               }
 
                pages = get_pages(obj);
                if (IS_ERR(pages)) {
@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
 
 fail:
        del_vma(vma);
-
+unlock:
        mutex_unlock(&msm_obj->lock);
        return ret;
 }
@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
        if (use_vram) {
                struct msm_gem_vma *vma;
                struct page **pages;
+               struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+               mutex_lock(&msm_obj->lock);
 
                vma = add_vma(obj, NULL);
+               mutex_unlock(&msm_obj->lock);
                if (IS_ERR(vma)) {
                        ret = PTR_ERR(vma);
                        goto fail;
index 6bfca74701410050b20d1a136ae5cdc4454b1306..8a75c0bd8a78b1481e30fdab63f2d14bfc64536d 100644 (file)
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
                struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
 {
        struct msm_gem_submit *submit;
-       uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
-               (nr_cmds * sizeof(submit->cmd[0]));
+       uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+               ((u64)nr_cmds * sizeof(submit->cmd[0]));
 
        if (sz > SIZE_MAX)
                return NULL;
@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        if (ret)
                goto out;
 
-       if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
+       if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
                ret = submit_fence_sync(submit);
                if (ret)
                        goto out;
index c36321bc87148864db09bd0af4fc38a39cb182f9..d34e331554f3903eaded86cf12fdd4a4ef24507a 100644 (file)
@@ -42,7 +42,7 @@ void
 msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma, struct sg_table *sgt)
 {
-       if (!vma->iova)
+       if (!aspace || !vma->iova)
                return;
 
        if (aspace->mmu) {
index c7c84d34d97e20308b926077e7ed4ce6e8d77281..88582af8bd89745b7c78332cf415dd0bc9f24e23 100644 (file)
@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
        /* Create output path objects for each VBIOS display path. */
        i = -1;
        while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+               if (ver < 0x40) /* No support for chipsets prior to NV50. */
+                       break;
                if (dcbE.type == DCB_OUTPUT_UNUSED)
                        continue;
                if (dcbE.type == DCB_OUTPUT_EOL)
index 5d450332c2fd79fd8c1052aca84d7d15c5db3ef8..2900f1410d959bc9f4002a6f0c38a3aae5a9c59a 100644 (file)
@@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
 static int vop_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
-       int ret;
+       int ret, i;
 
        ret = pm_runtime_get_sync(vop->dev);
        if (ret < 0) {
@@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
        }
 
        memcpy(vop->regs, vop->regsbak, vop->len);
+       /*
+        * We need to make sure that all windows are disabled before we
+        * enable the crtc. Otherwise we might try to scan from a destroyed
+        * buffer later.
+        */
+       for (i = 0; i < vop->data->win_size; i++) {
+               struct vop_win *vop_win = &vop->win[i];
+               const struct vop_win_data *win = vop_win->data;
+
+               spin_lock(&vop->reg_lock);
+               VOP_WIN_SET(vop, win, enable, 0);
+               spin_unlock(&vop->reg_lock);
+       }
+
        vop_cfg_done(vop);
 
        /*
@@ -566,28 +580,11 @@ err_put_pm_runtime:
 static void vop_crtc_disable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
-       int i;
 
        WARN_ON(vop->event);
 
        rockchip_drm_psr_deactivate(&vop->crtc);
 
-       /*
-        * We need to make sure that all windows are disabled before we
-        * disable that crtc. Otherwise we might try to scan from a destroyed
-        * buffer later.
-        */
-       for (i = 0; i < vop->data->win_size; i++) {
-               struct vop_win *vop_win = &vop->win[i];
-               const struct vop_win_data *win = vop_win->data;
-
-               spin_lock(&vop->reg_lock);
-               VOP_WIN_SET(vop, win, enable, 0);
-               spin_unlock(&vop->reg_lock);
-       }
-
-       vop_cfg_done(vop);
-
        drm_crtc_vblank_off(crtc);
 
        /*
@@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
         * Src.x1 can be odd when do clip, but yuv plane start point
         * need align with 2 pixel.
         */
-       if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
+       if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
+               DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
                return -EINVAL;
+       }
 
        return 0;
 }
@@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        spin_lock(&vop->reg_lock);
 
        VOP_WIN_SET(vop, win, format, format);
-       VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
+       VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
        VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
        if (is_yuv_support(fb->format->format)) {
                int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
@@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
                offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
 
                dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
-               VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
+               VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
                VOP_WIN_SET(vop, win, uv_mst, dma_addr);
        }
 
index 9979fd0c22821d7efa3d7054468e0914619e0692..27eefbfcf3d05f3ad03e3b9f6fe2ae56f68e7684 100644 (file)
@@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
 
        act_height = (src_h + vskiplines - 1) / vskiplines;
 
+       if (act_height == dst_h)
+               return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
+
        return GET_SCL_FT_BILI_DN(act_height, dst_h);
 }
 
index 2c4817fb08902427df09223b7ab4046241fc6c77..8fe5b184b4e8a945d68c201a3a29be531594e2d2 100644 (file)
@@ -7,7 +7,6 @@ config DRM_STM
        select DRM_PANEL
        select VIDEOMODE_HELPERS
        select FB_PROVIDE_GET_FB_UNMAPPED_AREA
-       default y
 
        help
          Enable support for the on-chip display controller on
index 6b5d3be283c4e7e00f72bb32e550b52ab06e1526..807299dd45ebf0663fbc97b3831bd8f92148ec20 100644 (file)
@@ -193,7 +193,6 @@ struct bmc150_accel_data {
        struct regmap *regmap;
        int irq;
        struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
-       atomic_t active_intr;
        struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
        struct mutex mutex;
        u8 fifo_mode, watermark;
@@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
                goto out_fix_power_state;
        }
 
-       if (state)
-               atomic_inc(&data->active_intr);
-       else
-               atomic_dec(&data->active_intr);
-
        return 0;
 
 out_fix_power_state:
@@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
        struct bmc150_accel_data *data = iio_priv(indio_dev);
 
        mutex_lock(&data->mutex);
-       if (atomic_read(&data->active_intr))
-               bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+       bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
        bmc150_accel_fifo_set_mode(data);
        mutex_unlock(&data->mutex);
 
index 07d1489cd457a6b5445b8b3ba35dad95b1792acc..e44f62bf9caa9f1a45c6699875b885d497bb0a7a 100644 (file)
@@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_ihl = 0x02,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x23,
+                       .value = BIT(0),
+               },
                .multi_read_bit = true,
                .bootime = 2,
        },
@@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_od = 0x40,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x23,
+                       .value = BIT(0),
+               },
                .multi_read_bit = true,
                .bootime = 2,
        },
@@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                                .en_mask = 0x08,
                        },
                },
+               .sim = {
+                       .addr = 0x24,
+                       .value = BIT(0),
+               },
                .multi_read_bit = false,
                .bootime = 2,
        },
@@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_int1 = 0x04,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x21,
+                       .value = BIT(1),
+               },
                .multi_read_bit = true,
                .bootime = 2, /* guess */
        },
@@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_od = 0x40,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x21,
+                       .value = BIT(7),
+               },
                .multi_read_bit = false,
                .bootime = 2, /* guess */
        },
@@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .addr_ihl = 0x22,
                        .mask_ihl = 0x80,
                },
+               .sim = {
+                       .addr = 0x23,
+                       .value = BIT(0),
+               },
                .multi_read_bit = true,
                .bootime = 2,
        },
@@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_int1 = 0x04,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x21,
+                       .value = BIT(1),
+               },
                .multi_read_bit = false,
                .bootime = 2,
        },
@@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .mask_ihl = 0x02,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
+               .sim = {
+                       .addr = 0x23,
+                       .value = BIT(0),
+               },
                .multi_read_bit = true,
                .bootime = 2,
        },
index e0ea411a0b2df9563085c70552086946843ba2ca..c02b23d675cbc1540ec47769515da714c784cf67 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/iio/iio.h>
 #include <linux/iio/driver.h>
+#include <linux/iopoll.h>
 
 #define ASPEED_RESOLUTION_BITS         10
 #define ASPEED_CLOCKS_PER_SAMPLE       12
 
 #define ASPEED_ENGINE_ENABLE           BIT(0)
 
+#define ASPEED_ADC_CTRL_INIT_RDY       BIT(8)
+
+#define ASPEED_ADC_INIT_POLLING_TIME   500
+#define ASPEED_ADC_INIT_TIMEOUT                500000
+
 struct aspeed_adc_model_data {
        const char *model_name;
        unsigned int min_sampling_rate; // Hz
        unsigned int max_sampling_rate; // Hz
        unsigned int vref_voltage;      // mV
+       bool wait_init_sequence;
 };
 
 struct aspeed_adc_data {
@@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
                goto scaler_error;
        }
 
+       model_data = of_device_get_match_data(&pdev->dev);
+
+       if (model_data->wait_init_sequence) {
+               /* Enable engine in normal mode. */
+               writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
+                      data->base + ASPEED_REG_ENGINE_CONTROL);
+
+               /* Wait for initial sequence complete. */
+               ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
+                                        adc_engine_control_reg_val,
+                                        adc_engine_control_reg_val &
+                                        ASPEED_ADC_CTRL_INIT_RDY,
+                                        ASPEED_ADC_INIT_POLLING_TIME,
+                                        ASPEED_ADC_INIT_TIMEOUT);
+               if (ret)
+                       goto scaler_error;
+       }
+
        /* Start all channels in normal mode. */
        ret = clk_prepare_enable(data->clk_scaler->clk);
        if (ret)
@@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
        .vref_voltage = 1800, // mV
        .min_sampling_rate = 1,
        .max_sampling_rate = 1000000,
+       .wait_init_sequence = true,
 };
 
 static const struct of_device_id aspeed_adc_matches[] = {
index 64799ad7ebad02a797607470678aa500842ed99f..462a99c13e7a210a74f1d9b2a9c87de7ebc3fad6 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/iio/driver.h>
 
 #define AXP288_ADC_EN_MASK             0xF1
+#define AXP288_ADC_TS_PIN_GPADC                0xF2
+#define AXP288_ADC_TS_PIN_ON           0xF3
 
 enum axp288_adc_id {
        AXP288_ADC_TS,
@@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
        return IIO_VAL_INT;
 }
 
+static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
+                               unsigned long address)
+{
+       int ret;
+
+       /* channels other than GPADC do not need to switch TS pin */
+       if (address != AXP288_GP_ADC_H)
+               return 0;
+
+       ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+       if (ret)
+               return ret;
+
+       /* When switching to the GPADC pin give things some time to settle */
+       if (mode == AXP288_ADC_TS_PIN_GPADC)
+               usleep_range(6000, 10000);
+
+       return 0;
+}
+
 static int axp288_adc_read_raw(struct iio_dev *indio_dev,
                        struct iio_chan_spec const *chan,
                        int *val, int *val2, long mask)
@@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
        mutex_lock(&indio_dev->mlock);
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
+               if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+                                       chan->address)) {
+                       dev_err(&indio_dev->dev, "GPADC mode\n");
+                       ret = -EINVAL;
+                       break;
+               }
                ret = axp288_adc_read_channel(val, chan->address, info->regmap);
+               if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+                                               chan->address))
+                       dev_err(&indio_dev->dev, "TS pin restore\n");
                break;
        default:
                ret = -EINVAL;
@@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
        return ret;
 }
 
+static int axp288_adc_set_state(struct regmap *regmap)
+{
+       /* ADC should be always enabled for internal FG to function */
+       if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
+               return -EIO;
+
+       return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+}
+
 static const struct iio_info axp288_adc_iio_info = {
        .read_raw = &axp288_adc_read_raw,
        .driver_module = THIS_MODULE,
@@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
         * Set ADC to enabled state at all time, including system suspend.
         * otherwise internal fuel gauge functionality may be affected.
         */
-       ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+       ret = axp288_adc_set_state(axp20x->regmap);
        if (ret) {
                dev_err(&pdev->dev, "unable to enable ADC device\n");
                return ret;
index 81d4c39e414a4da6b0f8df0ebd8922d371a909d7..137f577d94326a299e0f944edfd108bdc2db31fe 100644 (file)
@@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
 
 err:
        pm_runtime_put_autosuspend(indio_dev->dev.parent);
+       disable_irq(irq);
        mutex_unlock(&info->mutex);
 
        return ret;
@@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
                complete(&info->completion);
 
 out:
-       disable_irq_nosync(info->temp_data_irq);
        return IRQ_HANDLED;
 }
 
@@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
                complete(&info->completion);
 
 out:
-       disable_irq_nosync(info->fifo_data_irq);
        return IRQ_HANDLED;
 }
 
index 01fc76f7d6602c7b14c5f0cd4ea0c01090278087..c168e0db329ab49b6b59d720cc62cb2b37352088 100644 (file)
@@ -77,7 +77,7 @@
 #define VF610_ADC_ADSTS_MASK           0x300
 #define VF610_ADC_ADLPC_EN             0x80
 #define VF610_ADC_ADHSC_EN             0x400
-#define VF610_ADC_REFSEL_VALT          0x100
+#define VF610_ADC_REFSEL_VALT          0x800
 #define VF610_ADC_REFSEL_VBG           0x1000
 #define VF610_ADC_ADTRG_HARD           0x2000
 #define VF610_ADC_AVGS_8               0x4000
index 79c8c7cd70d5c6d74fc2e32cad372f8644233c6b..6e6a1ecc99ddf4b69252b6b4232c9cda6f00d4be 100644 (file)
@@ -550,6 +550,31 @@ out:
 }
 EXPORT_SYMBOL(st_sensors_read_info_raw);
 
+static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
+                       const struct st_sensor_settings *sensor_settings)
+{
+       struct st_sensor_data *sdata = iio_priv(indio_dev);
+       struct device_node *np = sdata->dev->of_node;
+       struct st_sensors_platform_data *pdata;
+
+       pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
+       if (((np && of_property_read_bool(np, "spi-3wire")) ||
+            (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
+               int err;
+
+               err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
+                                           sensor_settings->sim.addr,
+                                           sensor_settings->sim.value);
+               if (err < 0) {
+                       dev_err(&indio_dev->dev,
+                               "failed to init interface mode\n");
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
 int st_sensors_check_device_support(struct iio_dev *indio_dev,
                        int num_sensors_list,
                        const struct st_sensor_settings *sensor_settings)
@@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
                return -ENODEV;
        }
 
+       err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
+       if (err < 0)
+               return err;
+
        if (sensor_settings[i].wai_addr) {
                err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
                                           sensor_settings[i].wai_addr, &wai);
index e7d4ea75e007c0bd82ef087812524258ec69f490..7599693f7fe9597cb750319ccb2635362c1dc9f0 100644 (file)
@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
        struct tsl2563_chip *chip = iio_priv(dev_info);
 
        iio_push_event(dev_info,
-                      IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+                      IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
                                            0,
                                            IIO_EV_TYPE_THRESH,
                                            IIO_EV_DIR_EITHER),
index aa61ec15c1396ca3925ecf1a099fbf91a302dae7..f1bce05ffa135703792f24317db81375d3e4ebb3 100644 (file)
@@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .mask_od = 0x40,
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
-               .multi_read_bit = true,
+               .multi_read_bit = false,
                .bootime = 2,
        },
 };
index a5dfab6adf495b88e846ee03b815536a7917bf48..221468f77128440acb2f5956d2ecf25bac7cc80d 100644 (file)
@@ -537,10 +537,11 @@ void ib_unregister_device(struct ib_device *device)
        }
        up_read(&lists_rwsem);
 
-       mutex_unlock(&device_mutex);
-
        ib_device_unregister_rdmacg(device);
        ib_device_unregister_sysfs(device);
+
+       mutex_unlock(&device_mutex);
+
        ib_cache_cleanup_one(device);
 
        ib_security_destroy_port_pkey_list(device);
index c023e2c81b8f2b06443452f91edcc506b46b6d17..5e530d2bee4448ddcb68724199a5c93d36eec32a 100644 (file)
@@ -1153,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
                kref_get(&file->ref);
                mutex_unlock(&uverbs_dev->lists_mutex);
 
-               ib_uverbs_event_handler(&file->event_handler, &event);
 
                mutex_lock(&file->cleanup_mutex);
                ucontext = file->ucontext;
@@ -1170,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
                         * for example due to freeing the resources
                         * (e.g mmput).
                         */
+                       ib_uverbs_event_handler(&file->event_handler, &event);
                        ib_dev->disassociate_ucontext(ucontext);
                        mutex_lock(&file->cleanup_mutex);
                        ib_uverbs_cleanup_ucontext(file, ucontext, true);
index 5332f06b99ba4cfa3ef404ec816d09fb68ec0ddf..c2fba76becd4e985a35b9ed3d1ec1314c955706b 100644 (file)
@@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
        rhp = php->rhp;
 
        if (mr_type != IB_MR_TYPE_MEM_REG ||
-           max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl &&
+           max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
                                         use_dsgl))
                return ERR_PTR(-EINVAL);
 
index f78a733a63ec7e1f884930a72e53ba2100ee888e..d545302b8ef8c520d4eceb630a39db06b7a584a4 100644 (file)
@@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
        } else {
                u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
 
-               if (!dmac)
+               if (!dmac) {
+                       kfree(ah);
                        return ERR_PTR(-EINVAL);
+               }
                memcpy(ah->av.mac, dmac, ETH_ALEN);
        }
 
index 9ec1ae9a82c9843878ea10266b0d5338d8862308..a49ff2eb6fb3bca54f40bc460238eeb4a5b4ba52 100644 (file)
@@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
        u64 base = 0;
        u32 i, j;
        u32 k = 0;
-       u32 low;
 
        /* copy base values in obj_info */
-       for (i = I40IW_HMC_IW_QP, j = 0;
-                       i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+       for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+               if ((i == I40IW_HMC_IW_SRQ) ||
+                       (i == I40IW_HMC_IW_FSIMC) ||
+                       (i == I40IW_HMC_IW_FSIAV)) {
+                       info[i].base = 0;
+                       info[i].cnt = 0;
+                       continue;
+               }
                get_64bit_val(buf, j, &temp);
                info[i].base = RS_64_1(temp, 32) * 512;
                if (info[i].base > base) {
                        base = info[i].base;
                        k = i;
                }
-               low = (u32)(temp);
-               if (low)
-                       info[i].cnt = low;
+               if (i == I40IW_HMC_IW_APBVT_ENTRY) {
+                       info[i].cnt = 1;
+                       continue;
+               }
+               if (i == I40IW_HMC_IW_QP)
+                       info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+               else if (i == I40IW_HMC_IW_CQ)
+                       info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+               else
+                       info[i].cnt = (u32)(temp);
        }
        size = info[k].cnt * info[k].size + info[k].base;
        if (size & 0x1FFFFF)
@@ -154,6 +166,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
        return 0;
 }
 
+/**
+ * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
+ * @buf: ptr to fpm query buffer
+ * @buf_idx: index into buf
+ * @info: ptr to i40iw_hmc_obj_info struct
+ * @rsrc_idx: resource index into info
+ *
+ * Decode a 64 bit value from fpm query buffer into max count and size
+ */
+static u64 i40iw_sc_decode_fpm_query(u64 *buf,
+                                           u32 buf_idx,
+                                           struct i40iw_hmc_obj_info *obj_info,
+                                           u32 rsrc_idx)
+{
+       u64 temp;
+       u32 size;
+
+       get_64bit_val(buf, buf_idx, &temp);
+       obj_info[rsrc_idx].max_cnt = (u32)temp;
+       size = (u32)RS_64_1(temp, 32);
+       obj_info[rsrc_idx].size = LS_64_1(1, size);
+
+       return temp;
+}
+
 /**
  * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
  * @buf: ptr to fpm query buffer
@@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
                                struct i40iw_hmc_info *hmc_info,
                                struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
 {
-       u64 temp;
        struct i40iw_hmc_obj_info *obj_info;
-       u32 i, j, size;
+       u64 temp;
+       u32 size;
        u16 max_pe_sds;
 
        obj_info = hmc_info->hmc_obj;
@@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
        hmc_fpm_misc->max_sds = max_pe_sds;
        hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
 
-       for (i = I40IW_HMC_IW_QP, j = 8;
-            i <= I40IW_HMC_IW_ARP; i++, j += 8) {
-               get_64bit_val(buf, j, &temp);
-               if (i == I40IW_HMC_IW_QP)
-                       obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
-               else if (i == I40IW_HMC_IW_CQ)
-                       obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
-               else
-                       obj_info[i].max_cnt = (u32)temp;
+       get_64bit_val(buf, 8, &temp);
+       obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+       size = (u32)RS_64_1(temp, 32);
+       obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
 
-               size = (u32)RS_64_1(temp, 32);
-               obj_info[i].size = ((u64)1 << size);
-       }
-       for (i = I40IW_HMC_IW_MR, j = 48;
-                       i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
-               get_64bit_val(buf, j, &temp);
-               obj_info[i].max_cnt = (u32)temp;
-               size = (u32)RS_64_1(temp, 32);
-               obj_info[i].size = LS_64_1(1, size);
-       }
+       get_64bit_val(buf, 16, &temp);
+       obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+       size = (u32)RS_64_1(temp, 32);
+       obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
+
+       i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
+       i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
+
+       obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
+       obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+       i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
+       i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
 
-       get_64bit_val(buf, 120, &temp);
-       hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
-       get_64bit_val(buf, 120, &temp);
-       hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
-       get_64bit_val(buf, 120, &temp);
-       hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
        get_64bit_val(buf, 64, &temp);
+       obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
+       obj_info[I40IW_HMC_IW_XFFL].size = 4;
        hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
        if (!hmc_fpm_misc->xf_block_size)
                return I40IW_ERR_INVALID_SIZE;
+
+       i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
+
        get_64bit_val(buf, 80, &temp);
+       obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
+       obj_info[I40IW_HMC_IW_Q1FL].size = 4;
        hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
        if (!hmc_fpm_misc->q1_block_size)
                return I40IW_ERR_INVALID_SIZE;
+
+       i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
+
+       get_64bit_val(buf, 112, &temp);
+       obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
+       obj_info[I40IW_HMC_IW_PBLE].size = 8;
+
+       get_64bit_val(buf, 120, &temp);
+       hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
+       hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
+       hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
+
        return 0;
 }
 
@@ -3392,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_
                hmc_info->sd_table.sd_entry = virt_mem.va;
        }
 
-       /* fill size of objects which are fixed */
-       hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
-       hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
-       hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
-       hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
-       hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
-
        return ret_code;
 }
 
@@ -4840,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
 {
        u8 fcn_id = vsi->fcn_id;
 
-       if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
+       if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
                vsi->dev->fcn_id_array[fcn_id] = false;
        i40iw_hw_stats_stop_timer(vsi);
 }
index a39ac12b6a7e8430b80e42d9a38d7abd3b48d3b8..2ebaadbed379406e9d6c10f01fe0577ab515ebc9 100644 (file)
@@ -1507,8 +1507,8 @@ enum {
        I40IW_CQ0_ALIGNMENT_MASK =              (256 - 1),
        I40IW_HOST_CTX_ALIGNMENT_MASK =         (4 - 1),
        I40IW_SHADOWAREA_MASK =                 (128 - 1),
-       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK =    0,
-       I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK =   0
+       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK =    (4 - 1),
+       I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK =   (4 - 1)
 };
 
 enum i40iw_alignment {
index 71050c5d29a05f3f6cb9433aafd7e09fbb5695aa..7f5583d83622a57821d1d4718c6bbc49d84410d0 100644 (file)
@@ -685,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
        cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
        tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
        ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
-                                    I40IW_CQ0_ALIGNMENT_MASK);
+                                    I40IW_CQ0_ALIGNMENT);
        if (ret)
                return ret;
 
index 91c421762f06797be844f0834d7ff09ff36bb5f9..f7013f11d8085966bf6d9dab310f0f8d0f35422a 100644 (file)
@@ -62,7 +62,7 @@ enum i40iw_status_code {
        I40IW_ERR_INVALID_ALIGNMENT = -23,
        I40IW_ERR_FLUSHED_QUEUE = -24,
        I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
-       I40IW_ERR_INVALID_IMM_DATA_SIZE = -26,
+       I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
        I40IW_ERR_TIMEOUT = -27,
        I40IW_ERR_OPCODE_MISMATCH = -28,
        I40IW_ERR_CQP_COMPL_ERROR = -29,
index b0d3a0e8a9b522a3549d63593a0af38f682e71ba..1060725d18bce852a108d6ce9e0b55cb460bfb0d 100644 (file)
@@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
 
        op_info = &info->op.inline_rdma_write;
        if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
-               return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+               return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
 
        ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
        if (ret_code)
@@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
 
        op_info = &info->op.inline_send;
        if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
-               return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+               return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
 
        ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
        if (ret_code)
@@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
        get_64bit_val(cqe, 0, &qword0);
        get_64bit_val(cqe, 16, &qword2);
 
-       info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
+       info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
 
        info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
 
@@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
                                                         u8 *wqe_size)
 {
        if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
-               return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+               return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
 
        if (data_size <= 16)
                *wqe_size = I40IW_QP_WQE_MIN_SIZE;
index 69bda611d31385044766915410ac72eb70cffac5..90aa326fd7c0974e0c5a70eb5d2b46f50d03c490 100644 (file)
@@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
        struct pvrdma_dev *dev = to_vdev(ibcq->device);
        struct pvrdma_cq *cq = to_vcq(ibcq);
        u32 val = cq->cq_handle;
+       unsigned long flags;
+       int has_data = 0;
 
        val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
                PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
 
+       spin_lock_irqsave(&cq->cq_lock, flags);
+
        pvrdma_write_uar_cq(dev, val);
 
-       return 0;
+       if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
+               unsigned int head;
+
+               has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+                                                   cq->ibcq.cqe, &head);
+               if (unlikely(has_data == PVRDMA_INVALID_IDX))
+                       dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
+       }
+
+       spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+       return has_data;
 }
 
 /**
index 3b616cb7c67f88512e82dbc8ba5f61e565cbd2a5..714cf7f9b13859988ca7df9f1fc98a10e0e6fbae 100644 (file)
@@ -1248,6 +1248,10 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0100", 0 },
        { "ELAN0600", 0 },
        { "ELAN0605", 0 },
+       { "ELAN0608", 0 },
+       { "ELAN0605", 0 },
+       { "ELAN0609", 0 },
+       { "ELAN060B", 0 },
        { "ELAN1000", 0 },
        { }
 };
index 922ea02edcc3ef6c091b572275245e7d5fabd3f0..20b5b21c1bba8892f37aab4a0190dfd6352b72d1 100644 (file)
@@ -380,8 +380,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
                return 0;
 
        if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
-               psmouse_warn(psmouse, "failed to get extended button data\n");
-               button_info = 0;
+               psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+               button_info = 0x33;
        }
 
        psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
index b97188acc4f1006185a5f8cb4fb3ee0e5cf735cf..2d80fa8a0634aba34b366609d8bcc50f432bb31c 100644 (file)
@@ -1519,6 +1519,13 @@ static int arm_smmu_add_device(struct device *dev)
 
        if (using_legacy_binding) {
                ret = arm_smmu_register_legacy_master(dev, &smmu);
+
+               /*
+                * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
+                * will allocate/initialise a new one. Thus we need to update fwspec for
+                * later use.
+                */
+               fwspec = dev->iommu_fwspec;
                if (ret)
                        goto out_free;
        } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
index 28b26c80f4cf937b8547328b5d724a69e51b7d05..072bd227b6c677b40fee80f99e1364d733e681a3 100644 (file)
@@ -137,14 +137,14 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
 #define AT91_RTC_IMR           0x28
 #define AT91_RTC_IRQ_MASK      0x1f
 
-void __init aic_common_rtc_irq_fixup(struct device_node *root)
+void __init aic_common_rtc_irq_fixup(void)
 {
        struct device_node *np;
        void __iomem *regs;
 
-       np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
+       np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
        if (!np)
-               np = of_find_compatible_node(root, NULL,
+               np = of_find_compatible_node(NULL, NULL,
                                             "atmel,at91sam9x5-rtc");
 
        if (!np)
@@ -165,7 +165,7 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
 #define AT91_RTT_ALMIEN                (1 << 16)               /* Alarm Interrupt Enable */
 #define AT91_RTT_RTTINCIEN     (1 << 17)               /* Real Time Timer Increment Interrupt Enable */
 
-void __init aic_common_rtt_irq_fixup(struct device_node *root)
+void __init aic_common_rtt_irq_fixup(void)
 {
        struct device_node *np;
        void __iomem *regs;
@@ -196,11 +196,10 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches)
                return;
 
        match = of_match_node(matches, root);
-       of_node_put(root);
 
        if (match) {
-               void (*fixup)(struct device_node *) = match->data;
-               fixup(root);
+               void (*fixup)(void) = match->data;
+               fixup();
        }
 
        of_node_put(root);
index af60376d50debe30132acd00c58254c4d1a7ab9a..242e62c1851ead9744442e0236fa84198f745d7f 100644 (file)
@@ -33,8 +33,8 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
                                             const char *name, int nirqs,
                                             const struct of_device_id *matches);
 
-void __init aic_common_rtc_irq_fixup(struct device_node *root);
+void __init aic_common_rtc_irq_fixup(void);
 
-void __init aic_common_rtt_irq_fixup(struct device_node *root);
+void __init aic_common_rtt_irq_fixup(void);
 
 #endif /* __IRQ_ATMEL_AIC_COMMON_H */
index 37f952dd9fc94bdc5faa6c2721822b8780dd46e1..bb1ad451392fd8b7a315a6b2c5bea523af04a62a 100644 (file)
@@ -209,20 +209,20 @@ static const struct irq_domain_ops aic_irq_ops = {
        .xlate  = aic_irq_domain_xlate,
 };
 
-static void __init at91rm9200_aic_irq_fixup(struct device_node *root)
+static void __init at91rm9200_aic_irq_fixup(void)
 {
-       aic_common_rtc_irq_fixup(root);
+       aic_common_rtc_irq_fixup();
 }
 
-static void __init at91sam9260_aic_irq_fixup(struct device_node *root)
+static void __init at91sam9260_aic_irq_fixup(void)
 {
-       aic_common_rtt_irq_fixup(root);
+       aic_common_rtt_irq_fixup();
 }
 
-static void __init at91sam9g45_aic_irq_fixup(struct device_node *root)
+static void __init at91sam9g45_aic_irq_fixup(void)
 {
-       aic_common_rtc_irq_fixup(root);
-       aic_common_rtt_irq_fixup(root);
+       aic_common_rtc_irq_fixup();
+       aic_common_rtt_irq_fixup();
 }
 
 static const struct of_device_id aic_irq_fixups[] __initconst = {
index c04ee9a23d094f9d6a9e0c95a7451cf73626727a..6acad2ea0fb3565a2604ecbb3fb5235395d62fad 100644 (file)
@@ -305,9 +305,9 @@ static const struct irq_domain_ops aic5_irq_ops = {
        .xlate  = aic5_irq_domain_xlate,
 };
 
-static void __init sama5d3_aic_irq_fixup(struct device_node *root)
+static void __init sama5d3_aic_irq_fixup(void)
 {
-       aic_common_rtc_irq_fixup(root);
+       aic_common_rtc_irq_fixup();
 }
 
 static const struct of_device_id aic5_irq_fixups[] __initconst = {
index bddf169c4b37b7c9d2a91518aa233480dbb52f5d..b009b916a2923504414aafe6961b404614770da5 100644 (file)
@@ -189,6 +189,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
 
        ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
        ct->chip.irq_resume = brcmstb_l2_intc_resume;
+       ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend;
 
        if (data->can_wake) {
                /* This IRQ chip can wake the system, set all child interrupts
index 249240d9a4259eb72b7afb68c2e2723a72e7c9c5..833a90fe33aed839a81b781831027e677eef5581 100644 (file)
@@ -43,6 +43,7 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
                        *dev_id = args.args[0];
                        break;
                }
+               index++;
        } while (!ret);
 
        return ret;
index 68932873eebc0c3d14caa00b048d0c961e765a98..284738add89b3b94f77ad257fd314bc89a423cae 100644 (file)
@@ -1835,7 +1835,7 @@ static int __init its_of_probe(struct device_node *node)
 
 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
 
-#if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531)
+#ifdef CONFIG_ACPI_NUMA
 struct its_srat_map {
        /* numa node id */
        u32     numa_node;
@@ -1843,7 +1843,7 @@ struct its_srat_map {
        u32     its_id;
 };
 
-static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata;
+static struct its_srat_map *its_srat_maps __initdata;
 static int its_in_srat __initdata;
 
 static int __init acpi_get_its_numa_node(u32 its_id)
@@ -1857,6 +1857,12 @@ static int __init acpi_get_its_numa_node(u32 its_id)
        return NUMA_NO_NODE;
 }
 
+static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
+                                         const unsigned long end)
+{
+       return 0;
+}
+
 static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
                         const unsigned long end)
 {
@@ -1873,12 +1879,6 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
                return -EINVAL;
        }
 
-       if (its_in_srat >= MAX_NUMNODES) {
-               pr_err("SRAT: ITS affinity exceeding max count[%d]\n",
-                               MAX_NUMNODES);
-               return -EINVAL;
-       }
-
        node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
 
        if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
@@ -1897,14 +1897,37 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
 
 static void __init acpi_table_parse_srat_its(void)
 {
+       int count;
+
+       count = acpi_table_parse_entries(ACPI_SIG_SRAT,
+                       sizeof(struct acpi_table_srat),
+                       ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
+                       gic_acpi_match_srat_its, 0);
+       if (count <= 0)
+               return;
+
+       its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
+                               GFP_KERNEL);
+       if (!its_srat_maps) {
+               pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
+               return;
+       }
+
        acpi_table_parse_entries(ACPI_SIG_SRAT,
                        sizeof(struct acpi_table_srat),
                        ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
                        gic_acpi_parse_srat_its, 0);
 }
+
+/* free the its_srat_maps after ITS probing */
+static void __init acpi_its_srat_maps_free(void)
+{
+       kfree(its_srat_maps);
+}
 #else
 static void __init acpi_table_parse_srat_its(void)     { }
 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
+static void __init acpi_its_srat_maps_free(void) { }
 #endif
 
 static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
@@ -1951,6 +1974,7 @@ static void __init its_acpi_probe(void)
        acpi_table_parse_srat_its();
        acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
                              gic_acpi_parse_madt_its, 0);
+       acpi_its_srat_maps_free();
 }
 #else
 static void __init its_acpi_probe(void) { }
index dbffb7ab62033b346ca7fc3b3468d6bcb3a19a63..984c3ecfd22c21aff59b3cad177c676348d86dd4 100644 (file)
@@ -353,6 +353,8 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
 
                        if (static_key_true(&supports_deactivate))
                                gic_write_eoir(irqnr);
+                       else
+                               isb();
 
                        err = handle_domain_irq(gic_data.domain, irqnr, regs);
                        if (err) {
@@ -640,11 +642,16 @@ static void gic_smp_init(void)
 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
-       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       unsigned int cpu;
        void __iomem *reg;
        int enabled;
        u64 val;
 
+       if (force)
+               cpu = cpumask_first(mask_val);
+       else
+               cpu = cpumask_any_and(mask_val, cpu_online_mask);
+
        if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
@@ -831,8 +838,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
        if (ret)
                return ret;
 
-       for (i = 0; i < nr_irqs; i++)
-               gic_irq_domain_map(domain, virq + i, hwirq + i);
+       for (i = 0; i < nr_irqs; i++) {
+               ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
index 1b1df4f770bdefe0a16e0109624801f8af90f1f7..d3e7c43718b82b9e7fb2191dedadab1950c3ac7d 100644 (file)
@@ -361,6 +361,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
                if (likely(irqnr > 15 && irqnr < 1020)) {
                        if (static_key_true(&supports_deactivate))
                                writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+                       isb();
                        handle_domain_irq(gic->domain, irqnr, regs);
                        continue;
                }
@@ -401,10 +402,12 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
                goto out;
 
        cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
-       if (unlikely(gic_irq < 32 || gic_irq > 1020))
+       if (unlikely(gic_irq < 32 || gic_irq > 1020)) {
                handle_bad_irq(desc);
-       else
+       } else {
+               isb();
                generic_handle_irq(cascade_irq);
+       }
 
  out:
        chained_irq_exit(chip, desc);
@@ -1027,8 +1030,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
        if (ret)
                return ret;
 
-       for (i = 0; i < nr_irqs; i++)
-               gic_irq_domain_map(domain, virq + i, hwirq + i);
+       for (i = 0; i < nr_irqs; i++) {
+               ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
index 78fc5d5e90514353b258658da350a85c408b89db..92e6570b11435e7e4dade7aa433231008c82b294 100644 (file)
@@ -26,7 +26,7 @@
 
 #define FSM_TIMER_DEBUG 0
 
-void
+int
 mISDN_FsmNew(struct Fsm *fsm,
             struct FsmNode *fnlist, int fncount)
 {
@@ -34,6 +34,8 @@ mISDN_FsmNew(struct Fsm *fsm,
 
        fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
                                  fsm->event_count, GFP_KERNEL);
+       if (fsm->jumpmatrix == NULL)
+               return -ENOMEM;
 
        for (i = 0; i < fncount; i++)
                if ((fnlist[i].state >= fsm->state_count) ||
@@ -45,6 +47,7 @@ mISDN_FsmNew(struct Fsm *fsm,
                } else
                        fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
                                        fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
+       return 0;
 }
 EXPORT_SYMBOL(mISDN_FsmNew);
 
index 928f5be192c1fd4a5c8197160efb9dad21e1141d..e1def84902212e1637cf96f0c9cefb8c3950edbd 100644 (file)
@@ -55,7 +55,7 @@ struct FsmTimer {
        void *arg;
 };
 
-extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
+extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
 extern void mISDN_FsmFree(struct Fsm *);
 extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
 extern void mISDN_FsmChangeState(struct FsmInst *, int);
index bebc57b72138e721aa5a290482ae46e748aac2d2..3192b0eb39445435d7b38c0f914bded7c93b2fac 100644 (file)
@@ -414,8 +414,7 @@ l1_init(u_int *deb)
        l1fsm_s.event_count = L1_EVENT_COUNT;
        l1fsm_s.strEvent = strL1Event;
        l1fsm_s.strState = strL1SState;
-       mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
-       return 0;
+       return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
 }
 
 void
index 7243a6746f8b099d79d3221def6dcfcf138a6f51..9ff0903a0e89fb2f4f7373e1ef4d1d1663934b36 100644 (file)
@@ -2247,15 +2247,26 @@ static struct Bprotocol X75SLP = {
 int
 Isdnl2_Init(u_int *deb)
 {
+       int res;
        debug = deb;
        mISDN_register_Bprotocol(&X75SLP);
        l2fsm.state_count = L2_STATE_COUNT;
        l2fsm.event_count = L2_EVENT_COUNT;
        l2fsm.strEvent = strL2Event;
        l2fsm.strState = strL2State;
-       mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
-       TEIInit(deb);
+       res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
+       if (res)
+               goto error;
+       res = TEIInit(deb);
+       if (res)
+               goto error_fsm;
        return 0;
+
+error_fsm:
+       mISDN_FsmFree(&l2fsm);
+error:
+       mISDN_unregister_Bprotocol(&X75SLP);
+       return res;
 }
 
 void
index 908127efccf8ceb94ef2ab1ed7df4b17b26ce5b9..12d9e5f4beb1f81c5aa5e5af81bc9aca61c21668 100644 (file)
@@ -1387,23 +1387,37 @@ create_teimanager(struct mISDNdevice *dev)
 
 int TEIInit(u_int *deb)
 {
+       int res;
        debug = deb;
        teifsmu.state_count = TEI_STATE_COUNT;
        teifsmu.event_count = TEI_EVENT_COUNT;
        teifsmu.strEvent = strTeiEvent;
        teifsmu.strState = strTeiState;
-       mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+       res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+       if (res)
+               goto error;
        teifsmn.state_count = TEI_STATE_COUNT;
        teifsmn.event_count = TEI_EVENT_COUNT;
        teifsmn.strEvent = strTeiEvent;
        teifsmn.strState = strTeiState;
-       mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+       res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+       if (res)
+               goto error_smn;
        deactfsm.state_count =  DEACT_STATE_COUNT;
        deactfsm.event_count = DEACT_EVENT_COUNT;
        deactfsm.strEvent = strDeactEvent;
        deactfsm.strState = strDeactState;
-       mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+       res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+       if (res)
+               goto error_deact;
        return 0;
+
+error_deact:
+       mISDN_FsmFree(&teifsmn);
+error_smn:
+       mISDN_FsmFree(&teifsmu);
+error:
+       return res;
 }
 
 void TEIFree(void)
index c99634612fc408fbc97df9c7c8ef8e028efe2fad..b01e458d31e94ce9eba98b9300366121de550619 100644 (file)
@@ -7996,7 +7996,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
        if (mddev->safemode == 1)
                mddev->safemode = 0;
        /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
-       if (mddev->in_sync || !mddev->sync_checkers) {
+       if (mddev->in_sync || mddev->sync_checkers) {
                spin_lock(&mddev->lock);
                if (mddev->in_sync) {
                        mddev->in_sync = 0;
@@ -8656,6 +8656,9 @@ void md_check_recovery(struct mddev *mddev)
        if (mddev_trylock(mddev)) {
                int spares = 0;
 
+               if (!mddev->external && mddev->safemode == 1)
+                       mddev->safemode = 0;
+
                if (mddev->ro) {
                        struct md_rdev *rdev;
                        if (!mddev->external && mddev->in_sync)
index bfa1e907c472e49855f9e0abb4c307cd45514d4d..2dcbafa8e66ca7f5418fce8434fe9674977f0832 100644 (file)
@@ -236,9 +236,10 @@ struct r5l_io_unit {
        bool need_split_bio;
        struct bio *split_bio;
 
-       unsigned int has_flush:1;      /* include flush request */
-       unsigned int has_fua:1;        /* include fua request */
-       unsigned int has_null_flush:1; /* include empty flush request */
+       unsigned int has_flush:1;               /* include flush request */
+       unsigned int has_fua:1;                 /* include fua request */
+       unsigned int has_null_flush:1;          /* include null flush request */
+       unsigned int has_flush_payload:1;       /* include flush payload  */
        /*
         * io isn't sent yet, flush/fua request can only be submitted till it's
         * the first IO in running_ios list
@@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio)
        struct r5l_io_unit *io_deferred;
        struct r5l_log *log = io->log;
        unsigned long flags;
+       bool has_null_flush;
+       bool has_flush_payload;
 
        if (bio->bi_status)
                md_error(log->rdev->mddev, log->rdev);
@@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio)
 
        spin_lock_irqsave(&log->io_list_lock, flags);
        __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
+
+       /*
+        * if the io doesn't not have null_flush or flush payload,
+        * it is not safe to access it after releasing io_list_lock.
+        * Therefore, it is necessary to check the condition with
+        * the lock held.
+        */
+       has_null_flush = io->has_null_flush;
+       has_flush_payload = io->has_flush_payload;
+
        if (log->need_cache_flush && !list_empty(&io->stripe_list))
                r5l_move_to_end_ios(log);
        else
@@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio)
        if (log->need_cache_flush)
                md_wakeup_thread(log->rdev->mddev->thread);
 
-       if (io->has_null_flush) {
+       /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
+       if (has_null_flush) {
                struct bio *bi;
 
                WARN_ON(bio_list_empty(&io->flush_barriers));
                while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
                        bio_endio(bi);
-                       atomic_dec(&io->pending_stripe);
+                       if (atomic_dec_and_test(&io->pending_stripe)) {
+                               __r5l_stripe_write_finished(io);
+                               return;
+                       }
                }
        }
-
-       /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
-       if (atomic_read(&io->pending_stripe) == 0)
-               __r5l_stripe_write_finished(io);
+       /* decrease pending_stripe for flush payload */
+       if (has_flush_payload)
+               if (atomic_dec_and_test(&io->pending_stripe))
+                       __r5l_stripe_write_finished(io);
 }
 
 static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
@@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
        payload->size = cpu_to_le32(sizeof(__le64));
        payload->flush_stripes[0] = cpu_to_le64(sect);
        io->meta_offset += meta_size;
+       /* multiple flush payloads count as one pending_stripe */
+       if (!io->has_flush_payload) {
+               io->has_flush_payload = 1;
+               atomic_inc(&io->pending_stripe);
+       }
        mutex_unlock(&log->io_mutex);
 }
 
@@ -2540,23 +2562,32 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
  */
 int r5c_journal_mode_set(struct mddev *mddev, int mode)
 {
-       struct r5conf *conf = mddev->private;
-       struct r5l_log *log = conf->log;
-
-       if (!log)
-               return -ENODEV;
+       struct r5conf *conf;
+       int err;
 
        if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
            mode > R5C_JOURNAL_MODE_WRITE_BACK)
                return -EINVAL;
 
+       err = mddev_lock(mddev);
+       if (err)
+               return err;
+       conf = mddev->private;
+       if (!conf || !conf->log) {
+               mddev_unlock(mddev);
+               return -ENODEV;
+       }
+
        if (raid5_calc_degraded(conf) > 0 &&
-           mode == R5C_JOURNAL_MODE_WRITE_BACK)
+           mode == R5C_JOURNAL_MODE_WRITE_BACK) {
+               mddev_unlock(mddev);
                return -EINVAL;
+       }
 
        mddev_suspend(mddev);
        conf->log->r5c_journal_mode = mode;
        mddev_resume(mddev);
+       mddev_unlock(mddev);
 
        pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
                 mdname(mddev), mode, r5c_journal_mode_str[mode]);
index 8621a198a2ce3eae317e4007cd6c533b56cfecc4..bac33311f55a6d7a6699b362c39730fae9c4f107 100644 (file)
@@ -215,6 +215,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, dev);
 
+       /*
+        * MEI requires to resume from runtime suspend mode
+        * in order to perform link reset flow upon system suspend.
+        */
+       pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+
        /*
        * For not wake-able HW runtime pm framework
        * can't be used on pci device level.
index f811cd52446852beecfedf02b7100f4bb6789169..e38a5f144373451fc87007ffc1cf4292059c5408 100644 (file)
@@ -137,6 +137,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, dev);
 
+       /*
+        * MEI requires to resume from runtime suspend mode
+        * in order to perform link reset flow upon system suspend.
+        */
+       pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+
        /*
        * For not wake-able HW runtime pm framework
        * can't be used on pci device level.
index e5938c791330c9be1203c2698bc511e4f98aaea4..f1bbfd389367ff4530137be199c4063c65f97f5c 100644 (file)
@@ -2170,7 +2170,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
                 * from being accepted.
                 */
                card = md->queue.card;
+               spin_lock_irq(md->queue.queue->queue_lock);
                queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
+               spin_unlock_irq(md->queue.queue->queue_lock);
                blk_set_queue_dying(md->queue.queue);
                mmc_cleanup_queue(&md->queue);
                if (md->disk->flags & GENHD_FL_UP) {
index 4ffea14b7eb645d92a91d62907d64c97cf8a9998..2bae69e39544452dc323a9a3bd15ae9b3e7de1b9 100644 (file)
@@ -1289,7 +1289,7 @@ out_err:
 static int mmc_select_hs400es(struct mmc_card *card)
 {
        struct mmc_host *host = card->host;
-       int err = 0;
+       int err = -EINVAL;
        u8 val;
 
        if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
index 04ff3c97a535143933acec93caa8483a43f633c6..2ab4788d021f0512082c6bd67d1edaa57ba61379 100644 (file)
@@ -2086,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
        mmc->max_seg_size = mmc->max_req_size;
 
        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
-                    MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
+                    MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
 
        mmc->caps |= mmc_pdata(host)->caps;
        if (mmc->caps & MMC_CAP_8_BIT_DATA)
index f336a9b855765e5ea236fddf9564f5f084793597..9ec8f033ac5f077b05fdfc32a7f97f1747339ca8 100644 (file)
@@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
                        if (tr->writesect(dev, block, buf))
                                return BLK_STS_IOERR;
+               return BLK_STS_OK;
        default:
                return BLK_STS_IOERR;
        }
index 9bee6c1c70cca33941ae8db8002d69e760693bac..fc63992ab0e0ad3300aa9a8d991fc107788ae780 100644 (file)
@@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        new_slave->delay = 0;
        new_slave->link_failure_count = 0;
 
-       if (bond_update_speed_duplex(new_slave))
+       if (bond_update_speed_duplex(new_slave) &&
+           bond_needs_speed_duplex(bond))
                new_slave->link = BOND_LINK_DOWN;
 
        new_slave->last_rx = jiffies -
@@ -2140,11 +2141,13 @@ static void bond_miimon_commit(struct bonding *bond)
                        continue;
 
                case BOND_LINK_UP:
-                       if (bond_update_speed_duplex(slave)) {
+                       if (bond_update_speed_duplex(slave) &&
+                           bond_needs_speed_duplex(bond)) {
                                slave->link = BOND_LINK_DOWN;
-                               netdev_warn(bond->dev,
-                                           "failed to get link speed/duplex for %s\n",
-                                           slave->dev->name);
+                               if (net_ratelimit())
+                                       netdev_warn(bond->dev,
+                                                   "failed to get link speed/duplex for %s\n",
+                                                   slave->dev->name);
                                continue;
                        }
                        bond_set_slave_link_state(slave, BOND_LINK_UP,
index 5333601f855f88529c04e003eae5e3d19aa59f6d..dc3052751bc13ed2248c218de01849d865dbe952 100644 (file)
@@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                        p = (char *)&dev->stats;
                else
                        p = (char *)priv;
+
+               if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
+                       continue;
+
                p += s->stat_offset;
                data[j] = *(unsigned long *)p;
                j++;
index ef4be781fd054696edac7c494cf713f0dd5f3d4e..09ea62ee96d38b3d99bb48cbd0f72dcc715f8315 100644 (file)
@@ -529,6 +529,7 @@ enum {                                 /* adapter flags */
        USING_SOFT_PARAMS  = (1 << 6),
        MASTER_PF          = (1 << 7),
        FW_OFLD_CONN       = (1 << 9),
+       ROOT_NO_RELAXED_ORDERING = (1 << 10),
 };
 
 enum {
index e403fa18f1b15e570748b2136ae1bf8b54d14afb..33bb8678833adc6f83551d992f201f6314e68762 100644 (file)
@@ -4654,11 +4654,6 @@ static void print_port_info(const struct net_device *dev)
                    dev->name, adap->params.vpd.id, adap->name, buf);
 }
 
-static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
-{
-       pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
-}
-
 /*
  * Free the following resources:
  * - memory used for tables
@@ -4908,7 +4903,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        pci_enable_pcie_error_reporting(pdev);
-       enable_pcie_relaxed_ordering(pdev);
        pci_set_master(pdev);
        pci_save_state(pdev);
 
@@ -4947,6 +4941,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->msg_enable = DFLT_MSG_ENABLE;
        memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
 
+       /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
+        * Ingress Packet Data to Free List Buffers in order to allow for
+        * chipset performance optimizations between the Root Complex and
+        * Memory Controllers.  (Messages to the associated Ingress Queue
+        * notifying new Packet Placement in the Free Lists Buffers will be
+        * send without the Relaxed Ordering Attribute thus guaranteeing that
+        * all preceding PCIe Transaction Layer Packets will be processed
+        * first.)  But some Root Complexes have various issues with Upstream
+        * Transaction Layer Packets with the Relaxed Ordering Attribute set.
+        * The PCIe devices which under the Root Complexes will be cleared the
+        * Relaxed Ordering bit in the configuration space, So we check our
+        * PCIe configuration space to see if it's flagged with advice against
+        * using Relaxed Ordering.
+        */
+       if (!pcie_relaxed_ordering_enabled(pdev))
+               adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+
        spin_lock_init(&adapter->stats_lock);
        spin_lock_init(&adapter->tid_release_lock);
        spin_lock_init(&adapter->win0_lock);
index ede12209f20be4573a5464de9e7f7a0da0088c31..4ef68f69b58c45322d65f414e06d068ade4ab22d 100644 (file)
@@ -2719,6 +2719,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
        struct fw_iq_cmd c;
        struct sge *s = &adap->sge;
        struct port_info *pi = netdev_priv(dev);
+       int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
 
        /* Size needs to be multiple of 16, including status entry. */
        iq->size = roundup(iq->size, 16);
@@ -2772,8 +2773,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
 
                flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
                c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
-                                            FW_IQ_CMD_FL0FETCHRO_F |
-                                            FW_IQ_CMD_FL0DATARO_F |
+                                            FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+                                            FW_IQ_CMD_FL0DATARO_V(relaxed) |
                                             FW_IQ_CMD_FL0PADEN_F);
                if (cong >= 0)
                        c.iqns_to_fl0congen |=
index 109bc630408b65b1dc67cedbaab00ce0601d2574..08c6ddb84a049ecc75f414a968dba78974d5837f 100644 (file)
@@ -408,6 +408,7 @@ enum { /* adapter flags */
        USING_MSI          = (1UL << 1),
        USING_MSIX         = (1UL << 2),
        QUEUES_BOUND       = (1UL << 3),
+       ROOT_NO_RELAXED_ORDERING = (1UL << 4),
 };
 
 /*
index ac7a150c54e9b4e93b751821eb09d80669eecc69..2b85b874fd0d2c96fadb4888b0bd4695628bf611 100644 (file)
@@ -2888,6 +2888,24 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
         */
        adapter->name = pci_name(pdev);
        adapter->msg_enable = DFLT_MSG_ENABLE;
+
+       /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
+        * Ingress Packet Data to Free List Buffers in order to allow for
+        * chipset performance optimizations between the Root Complex and
+        * Memory Controllers.  (Messages to the associated Ingress Queue
+        * notifying new Packet Placement in the Free Lists Buffers will be
+        * send without the Relaxed Ordering Attribute thus guaranteeing that
+        * all preceding PCIe Transaction Layer Packets will be processed
+        * first.)  But some Root Complexes have various issues with Upstream
+        * Transaction Layer Packets with the Relaxed Ordering Attribute set.
+        * The PCIe devices which under the Root Complexes will be cleared the
+        * Relaxed Ordering bit in the configuration space, So we check our
+        * PCIe configuration space to see if it's flagged with advice against
+        * using Relaxed Ordering.
+        */
+       if (!pcie_relaxed_ordering_enabled(pdev))
+               adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+
        err = adap_init0(adapter);
        if (err)
                goto err_unmap_bar;
index e37dde2ba97f6d529177d475be2a6d001071de0b..05498e7f284034d86a8f8531de95cf7f59ec4890 100644 (file)
@@ -2205,6 +2205,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
        struct port_info *pi = netdev_priv(dev);
        struct fw_iq_cmd cmd, rpl;
        int ret, iqandst, flsz = 0;
+       int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING);
 
        /*
         * If we're using MSI interrupts and we're not initializing the
@@ -2300,6 +2301,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
                        cpu_to_be32(
                                FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
                                FW_IQ_CMD_FL0PACKEN_F |
+                               FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+                               FW_IQ_CMD_FL0DATARO_V(relaxed) |
                                FW_IQ_CMD_FL0PADEN_F);
 
                /* In T6, for egress queue type FL there is internal overhead
index dd7fa9cf225ff3c9e60d6f62e3f7f17317f2ea52..b0837b58c3a1084e8261ff3bf6d6057c4d8e7f54 100644 (file)
@@ -115,14 +115,10 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
                return;
        }
 
-       if (link) {
+       if (link)
                netif_carrier_on(netdev);
-               rtnl_lock();
-               dev_set_mtu(netdev, be16_to_cpu(msg->mtu));
-               rtnl_unlock();
-       } else {
+       else
                netif_carrier_off(netdev);
-       }
        rcu_read_unlock();
 }
 
index c905971c5f3a2849262dcf3ce220de93e32e4bd2..990a63d7fcb7213fa5c5d6e40a10ec5102af892a 100644 (file)
@@ -938,7 +938,6 @@ enum efx_stats_action {
 static int efx_mcdi_mac_stats(struct efx_nic *efx,
                              enum efx_stats_action action, int clear)
 {
-       struct efx_ef10_nic_data *nic_data = efx->nic_data;
        MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
        int rc;
        int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -960,7 +959,12 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
                              MAC_STATS_IN_PERIODIC_NOEVENT, 1,
                              MAC_STATS_IN_PERIOD_MS, period);
        MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-       MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+
+       if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+               struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+               MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+       }
 
        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
                                NULL, 0, NULL);
index db157a47000c65fb588d3f1c5dfe21bbd20983fb..72ec711fcba242775feea3064bdcb69de6c1812e 100644 (file)
@@ -204,6 +204,7 @@ int stmmac_mdio_register(struct net_device *ndev)
        struct stmmac_priv *priv = netdev_priv(ndev);
        struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
        struct device_node *mdio_node = priv->plat->mdio_node;
+       struct device *dev = ndev->dev.parent;
        int addr, found;
 
        if (!mdio_bus_data)
@@ -237,7 +238,7 @@ int stmmac_mdio_register(struct net_device *ndev)
        else
                err = mdiobus_register(new_bus);
        if (err != 0) {
-               netdev_err(ndev, "Cannot register the MDIO bus\n");
+               dev_err(dev, "Cannot register the MDIO bus\n");
                goto bus_register_fail;
        }
 
@@ -285,14 +286,12 @@ int stmmac_mdio_register(struct net_device *ndev)
                        irq_str = irq_num;
                        break;
                }
-               netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
-                           phydev->phy_id, addr, irq_str, phydev_name(phydev),
-                           act ? " active" : "");
+               phy_attached_info(phydev);
                found = 1;
        }
 
        if (!found && !mdio_node) {
-               netdev_warn(ndev, "No PHY found\n");
+               dev_warn(dev, "No PHY found\n");
                mdiobus_unregister(new_bus);
                mdiobus_free(new_bus);
                return -ENODEV;
index de8156c6b2925741534a45a6c3a28a3afe9d1ad6..2bbda71818adb022853964dd6d51a14c26f7cd19 100644 (file)
@@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
        if (data[IFLA_GENEVE_ID]) {
                __u32 vni =  nla_get_u32(data[IFLA_GENEVE_ID]);
 
-               if (vni >= GENEVE_VID_MASK)
+               if (vni >= GENEVE_N_VID)
                        return -ERANGE;
        }
 
index d21258d277ce4bca965e588e537df507d4e23892..f1b60740e02080261d998686a28e27c9ee4ea8a2 100644 (file)
@@ -159,8 +159,10 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
 
        brcmf_feat_firmware_capabilities(ifp);
        memset(&gscan_cfg, 0, sizeof(gscan_cfg));
-       brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, "pfn_gscan_cfg",
-                                 &gscan_cfg, sizeof(gscan_cfg));
+       if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID)
+               brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
+                                         "pfn_gscan_cfg",
+                                         &gscan_cfg, sizeof(gscan_cfg));
        brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
        if (drvr->bus_if->wowl_supported)
                brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
index b4ecd1fe137489617ab477608e0152712db0f784..97208ce19f927685ce79b2b2321ee5b3086f7a3c 100644 (file)
@@ -154,7 +154,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
 const struct iwl_cfg iwl9160_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9160",
        .fw_name_pre = IWL9260A_FW_PRE,
-       .fw_name_pre_next_step = IWL9260B_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
        IWL_DEVICE_9000,
        .ht_params = &iwl9000_ht_params,
        .nvm_ver = IWL9000_NVM_VERSION,
@@ -165,7 +165,7 @@ const struct iwl_cfg iwl9160_2ac_cfg = {
 const struct iwl_cfg iwl9260_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9260",
        .fw_name_pre = IWL9260A_FW_PRE,
-       .fw_name_pre_next_step = IWL9260B_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
        IWL_DEVICE_9000,
        .ht_params = &iwl9000_ht_params,
        .nvm_ver = IWL9000_NVM_VERSION,
@@ -176,7 +176,7 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
 const struct iwl_cfg iwl9270_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9270",
        .fw_name_pre = IWL9260A_FW_PRE,
-       .fw_name_pre_next_step = IWL9260B_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
        IWL_DEVICE_9000,
        .ht_params = &iwl9000_ht_params,
        .nvm_ver = IWL9000_NVM_VERSION,
@@ -186,8 +186,8 @@ const struct iwl_cfg iwl9270_2ac_cfg = {
 
 const struct iwl_cfg iwl9460_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9460",
-       .fw_name_pre = IWL9000_FW_PRE,
-       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       .fw_name_pre = IWL9260A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
        IWL_DEVICE_9000,
        .ht_params = &iwl9000_ht_params,
        .nvm_ver = IWL9000_NVM_VERSION,
@@ -198,8 +198,8 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
 
 const struct iwl_cfg iwl9560_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9560",
-       .fw_name_pre = IWL9000_FW_PRE,
-       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       .fw_name_pre = IWL9260A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
        IWL_DEVICE_9000,
        .ht_params = &iwl9000_ht_params,
        .nvm_ver = IWL9000_NVM_VERSION,
index 0fa8c473f1e2738b8490e2555af1ae95319d96dd..c73a6438ce8fbcd12a8e12e359c5f7d25e66a76f 100644 (file)
@@ -328,6 +328,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
  *     command size (command version 4) that supports toggling ACK TX
  *     power reduction.
+ * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
  *
  * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
  */
@@ -373,6 +374,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG        = (__force iwl_ucode_tlv_capa_t)80,
        IWL_UCODE_TLV_CAPA_LQM_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)81,
        IWL_UCODE_TLV_CAPA_TX_POWER_ACK                 = (__force iwl_ucode_tlv_capa_t)84,
+       IWL_UCODE_TLV_CAPA_MLME_OFFLOAD                 = (__force iwl_ucode_tlv_capa_t)96,
 
        NUM_IWL_UCODE_TLV_CAPA
 #ifdef __CHECKER__
index c52623cb7c2a1bc89d635f4406df70bc3101f340..d19c74827fbb6094147ab48fc266cd6dbbb15e28 100644 (file)
@@ -276,10 +276,10 @@ struct iwl_pwr_tx_backoff {
  * @fw_name_pre: Firmware filename prefix. The api version and extension
  *     (.ucode) will be added to filename before loading from disk. The
  *     filename is constructed as fw_name_pre<api>.ucode.
- * @fw_name_pre_next_step: same as @fw_name_pre, only for next step
+ * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
  *     (if supported)
- * @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next
- *     step. Supported only in integrated solutions.
+ * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
+ *     next step. Supported only in integrated solutions.
  * @ucode_api_max: Highest version of uCode API supported by driver.
  * @ucode_api_min: Lowest version of uCode API supported by driver.
  * @max_inst_size: The maximal length of the fw inst section
@@ -330,7 +330,7 @@ struct iwl_cfg {
        /* params specific to an individual device within a device family */
        const char *name;
        const char *fw_name_pre;
-       const char *fw_name_pre_next_step;
+       const char *fw_name_pre_b_or_c_step;
        const char *fw_name_pre_rf_next_step;
        /* params not likely to change within a device family */
        const struct iwl_base_params *base_params;
index 6fdb5921e17f41846db6a4c7302d9248247d1c03..4e0f86fe0a6f0874a96d41ae257c9621a2f035ee 100644 (file)
@@ -216,8 +216,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
        const char *fw_pre_name;
 
        if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
-           CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
-               fw_pre_name = cfg->fw_name_pre_next_step;
+           (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
+            CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
+               fw_pre_name = cfg->fw_name_pre_b_or_c_step;
        else if (drv->trans->cfg->integrated &&
                 CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
                 cfg->fw_name_pre_rf_next_step)
index 5c08f4d40f6ac78fbdeb575e152742c9c0a3cef5..3ee6767392b61151efc774610223e2f6216d22f3 100644 (file)
@@ -785,7 +785,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                       int num_of_ch, __le32 *channels, u16 fw_mcc)
 {
        int ch_idx;
-       u16 ch_flags, prev_ch_flags = 0;
+       u16 ch_flags;
+       u32 reg_rule_flags, prev_reg_rule_flags = 0;
        const u8 *nvm_chan = cfg->ext_nvm ?
                             iwl_ext_nvm_channels : iwl_nvm_channels;
        struct ieee80211_regdomain *regd;
@@ -834,8 +835,11 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                        continue;
                }
 
+               reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
+                                                            ch_flags, cfg);
+
                /* we can't continue the same rule */
-               if (ch_idx == 0 || prev_ch_flags != ch_flags ||
+               if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
                    center_freq - prev_center_freq > 20) {
                        valid_rules++;
                        new_rule = true;
@@ -854,18 +858,17 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                rule->power_rule.max_eirp =
                        DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
 
-               rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
-                                                         ch_flags, cfg);
+               rule->flags = reg_rule_flags;
 
                /* rely on auto-calculation to merge BW of contiguous chans */
                rule->flags |= NL80211_RRF_AUTO_BW;
                rule->freq_range.max_bandwidth_khz = 0;
 
-               prev_ch_flags = ch_flags;
                prev_center_freq = center_freq;
+               prev_reg_rule_flags = reg_rule_flags;
 
                IWL_DEBUG_DEV(dev, IWL_DL_LAR,
-                             "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
+                             "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n",
                              center_freq,
                              band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
                              CHECK_AND_PRINT_I(VALID),
@@ -877,10 +880,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                              CHECK_AND_PRINT_I(160MHZ),
                              CHECK_AND_PRINT_I(INDOOR_ONLY),
                              CHECK_AND_PRINT_I(GO_CONCURRENT),
-                             ch_flags,
+                             ch_flags, reg_rule_flags,
                              ((ch_flags & NVM_CHANNEL_ACTIVE) &&
                               !(ch_flags & NVM_CHANNEL_RADAR))
-                                        ? "" : "not ");
+                                        ? "Ad-Hoc" : "");
        }
 
        regd->n_reg_rules = valid_rules;
index 79e7a7a285dc960597e945ce7b80f96e729a86ad..82863e9273eb66ce9058a5974d2f35988290e1dc 100644 (file)
@@ -1275,8 +1275,10 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
 
                        entry = &wifi_pkg->package.elements[idx++];
                        if ((entry->type != ACPI_TYPE_INTEGER) ||
-                           (entry->integer.value > U8_MAX))
-                               return -EINVAL;
+                           (entry->integer.value > U8_MAX)) {
+                               ret = -EINVAL;
+                               goto out_free;
+                       }
 
                        mvm->geo_profiles[i].values[j] = entry->integer.value;
                }
index c7b1e58e33847a8250ab708695373a0817f85874..ce901be5fba87e3674f06b030cfae8759e063c3b 100644 (file)
@@ -2597,8 +2597,18 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
        spin_lock_bh(&mvm_sta->lock);
        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
                tid_data = &mvm_sta->tid_data[i];
-               while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+
+               while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
+                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+                       /*
+                        * The first deferred frame should've stopped the MAC
+                        * queues, so we should never get a second deferred
+                        * frame for the RA/TID.
+                        */
+                       iwl_mvm_start_mac_queues(mvm, info->hw_queue);
                        ieee80211_free_txskb(mvm->hw, skb);
+               }
        }
        spin_unlock_bh(&mvm_sta->lock);
 }
index 65beca3a457a19d2dcd804d01a590f7404e04f65..8999a1199d60d27bdec88d6c638feb85403c3ced 100644 (file)
@@ -1291,7 +1291,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
         * first index into rate scale table.
         */
        if (info->flags & IEEE80211_TX_STAT_AMPDU) {
-               rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+               rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
                                    info->status.ampdu_len,
                                    info->status.ampdu_ack_len,
                                    reduced_txp);
@@ -1312,7 +1312,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                if (info->status.ampdu_ack_len == 0)
                        info->status.ampdu_len = 1;
 
-               rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+               rs_collect_tlc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
                                    info->status.ampdu_len,
                                    info->status.ampdu_ack_len);
 
@@ -1348,11 +1348,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                                continue;
 
                        rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
-                                           lq_rate.index, 1,
+                                           tx_resp_rate.index, 1,
                                            i < retries ? 0 : legacy_success,
                                            reduced_txp);
                        rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
-                                           lq_rate.index, 1,
+                                           tx_resp_rate.index, 1,
                                            i < retries ? 0 : legacy_success);
                }
 
index f3e608196369a3885804b42331f442a16623e388..71c8b800ffa99874bd4120e59e2aa7f51212d2da 100644 (file)
@@ -636,9 +636,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
 
        baid_data = rcu_dereference(mvm->baid_map[baid]);
        if (!baid_data) {
-               WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
-                    "Received baid %d, but no data exists for this BAID\n",
-                    baid);
+               IWL_DEBUG_RX(mvm,
+                            "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+                             baid, reorder);
                return false;
        }
 
@@ -759,7 +759,9 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
 
        data = rcu_dereference(mvm->baid_map[baid]);
        if (!data) {
-               WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
+               IWL_DEBUG_RX(mvm,
+                            "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+                             baid, reorder_data);
                goto out;
        }
 
index ab66b4394dfc8ca2afc0cf1321f93f28f186d1cb..027ee5e72172c85f9eaa98f1fd1a26489ab74820 100644 (file)
@@ -121,7 +121,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
                .add_modify = update ? 1 : 0,
                .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
-                                                STA_FLG_MIMO_EN_MSK),
+                                                STA_FLG_MIMO_EN_MSK |
+                                                STA_FLG_RTS_MIMO_PROT),
                .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
        };
        int ret;
@@ -290,8 +291,8 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data)
                goto unlock;
 
        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-       ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
-                                         sta->addr, ba_data->tid);
+       ieee80211_rx_ba_timer_expired(mvm_sta->vif,
+                                     sta->addr, ba_data->tid);
 unlock:
        rcu_read_unlock();
 }
index 60360ed73f26165c890d0af3bac742b5669fb8d9..5fcc9dd6be56de52fa0a063969cf58011b8eb3d8 100644 (file)
@@ -185,8 +185,14 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
        else
                udp_hdr(skb)->check = 0;
 
-       /* mac header len should include IV, size is in words */
-       if (info->control.hw_key)
+       /*
+        * mac header len should include IV, size is in words unless
+        * the IV is added by the firmware like in WEP.
+        * In new Tx API, the IV is always added by the firmware.
+        */
+       if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
+           info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+           info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
                mh_len += info->control.hw_key->iv_len;
        mh_len /= 2;
        offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
@@ -1815,6 +1821,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
        struct iwl_mvm_tid_data *tid_data;
        struct iwl_mvm_sta *mvmsta;
 
+       ba_info.flags = IEEE80211_TX_STAT_AMPDU;
+
        if (iwl_mvm_has_new_tx_api(mvm)) {
                struct iwl_mvm_compressed_ba_notif *ba_res =
                        (void *)pkt->data;
index f16c1bb9bf94b6bef6bfb6ce2ff36c3faf771c00..84f4ba01e14fa2e84878dc75fda5d050e3500880 100644 (file)
@@ -510,9 +510,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 
 /* 9000 Series */
        {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
@@ -527,10 +535,22 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
index c49f1f8b2e57459deb2605f242c9324e8960a95b..37046ac2c4413a51b9a77864cdf9e434a6ac7bd6 100644 (file)
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
 
        c.directive.opcode = nvme_admin_directive_recv;
        c.directive.nsid = cpu_to_le32(nsid);
-       c.directive.numd = cpu_to_le32(sizeof(*s));
+       c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
        c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
        c.directive.dtype = NVME_DIR_STREAMS;
 
@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
        blk_queue_write_cache(q, vwc, vwc);
 }
 
-static void nvme_configure_apst(struct nvme_ctrl *ctrl)
+static int nvme_configure_apst(struct nvme_ctrl *ctrl)
 {
        /*
         * APST (Autonomous Power State Transition) lets us program a
@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
         * then don't do anything.
         */
        if (!ctrl->apsta)
-               return;
+               return 0;
 
        if (ctrl->npss > 31) {
                dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
-               return;
+               return 0;
        }
 
        table = kzalloc(sizeof(*table), GFP_KERNEL);
        if (!table)
-               return;
+               return 0;
 
        if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
                /* Turn off APST. */
@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
 
        kfree(table);
+       return ret;
 }
 
 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                 * In fabrics we need to verify the cntlid matches the
                 * admin connect
                 */
-               if (ctrl->cntlid != le16_to_cpu(id->cntlid))
+               if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
                        ret = -EINVAL;
+                       goto out_free;
+               }
 
                if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
                        dev_err(ctrl->device,
                                "keep-alive support is mandatory for fabrics\n");
                        ret = -EINVAL;
+                       goto out_free;
                }
        } else {
                ctrl->cntlid = le16_to_cpu(id->cntlid);
@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        else if (!ctrl->apst_enabled && prev_apst_enabled)
                dev_pm_qos_hide_latency_tolerance(ctrl->device);
 
-       nvme_configure_apst(ctrl);
-       nvme_configure_directives(ctrl);
+       ret = nvme_configure_apst(ctrl);
+       if (ret < 0)
+               return ret;
+
+       ret = nvme_configure_directives(ctrl);
+       if (ret < 0)
+               return ret;
 
        ctrl->identified = true;
 
+       return 0;
+
+out_free:
+       kfree(id);
        return ret;
 }
 EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
        if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
                return sprintf(buf, "eui.%8phN\n", ns->eui);
 
-       while (ctrl->serial[serial_len - 1] == ' ')
+       while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
+                                 ctrl->serial[serial_len - 1] == '\0'))
                serial_len--;
-       while (ctrl->model[model_len - 1] == ' ')
+       while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
+                                ctrl->model[model_len - 1] == '\0'))
                model_len--;
 
        return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
index 2e582a2409437bca7d603598290d023bd988c577..5f5cd306f76d05e8cb7351cb91403d7f9a554d9a 100644 (file)
@@ -794,7 +794,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
                int i;
 
                for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
-                       if (opt_tokens[i].token & ~allowed_opts) {
+                       if ((opt_tokens[i].token & opts->mask) &&
+                           (opt_tokens[i].token & ~allowed_opts)) {
                                pr_warn("invalid parameter '%s'\n",
                                        opt_tokens[i].pattern);
                        }
index cd888a47d0fccb728b155f75aa85f0836ec4eac3..925467b31a333940dc62d62c4c5f3376316e4120 100644 (file)
@@ -801,6 +801,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
                return;
        }
 
+       nvmeq->cqe_seen = 1;
        req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
        nvme_end_request(req, cqe->status, cqe->result);
 }
@@ -830,10 +831,8 @@ static void nvme_process_cq(struct nvme_queue *nvmeq)
                consumed++;
        }
 
-       if (consumed) {
+       if (consumed)
                nvme_ring_cq_doorbell(nvmeq);
-               nvmeq->cqe_seen = 1;
-       }
 }
 
 static irqreturn_t nvme_irq(int irq, void *data)
@@ -1558,11 +1557,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
        if (dev->cmb) {
                iounmap(dev->cmb);
                dev->cmb = NULL;
-               if (dev->cmbsz) {
-                       sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
-                                                    &dev_attr_cmb.attr, NULL);
-                       dev->cmbsz = 0;
-               }
+               sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
+                                            &dev_attr_cmb.attr, NULL);
+               dev->cmbsz = 0;
        }
 }
 
@@ -1953,16 +1950,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
 
        /*
         * CMBs can currently only exist on >=1.2 PCIe devices. We only
-        * populate sysfs if a CMB is implemented. Note that we add the
-        * CMB attribute to the nvme_ctrl kobj which removes the need to remove
-        * it on exit. Since nvme_dev_attrs_group has no name we can pass
-        * NULL as final argument to sysfs_add_file_to_group.
+        * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
+        * has no name we can pass NULL as final argument to
+        * sysfs_add_file_to_group.
         */
 
        if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
                dev->cmb = nvme_map_cmb(dev);
-
-               if (dev->cmbsz) {
+               if (dev->cmb) {
                        if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
                                                    &dev_attr_cmb.attr, NULL))
                                dev_warn(dev->ctrl.device,
index 2d7a98ab53fbf2de131990b753b929fe31cd154b..a53bb6635b8378d00f7fb4d367a16170d81a846e 100644 (file)
@@ -199,12 +199,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
        copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
        copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
 
-       memset(id->mn, ' ', sizeof(id->mn));
-       strncpy((char *)id->mn, "Linux", sizeof(id->mn));
-
-       memset(id->fr, ' ', sizeof(id->fr));
-       strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
-
        id->rab = 6;
 
        /*
index 31ca55dfcb1d49f3a1d88f7c6f5d0e7f0ee1e1ea..309c84aa7595b9b2ffab72c8a632c97f29def6d1 100644 (file)
@@ -114,6 +114,11 @@ struct nvmet_fc_tgtport {
        struct kref                     ref;
 };
 
+struct nvmet_fc_defer_fcp_req {
+       struct list_head                req_list;
+       struct nvmefc_tgt_fcp_req       *fcp_req;
+};
+
 struct nvmet_fc_tgt_queue {
        bool                            ninetypercent;
        u16                             qid;
@@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue {
        struct nvmet_fc_tgt_assoc       *assoc;
        struct nvmet_fc_fcp_iod         *fod;           /* array of fcp_iods */
        struct list_head                fod_list;
+       struct list_head                pending_cmd_list;
+       struct list_head                avail_defer_list;
        struct workqueue_struct         *work_q;
        struct kref                     ref;
 } __aligned(sizeof(unsigned long long));
@@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+                                       struct nvmet_fc_fcp_iod *fod);
 
 
 /* *********************** FC-NVME DMA Handling **************************** */
@@ -385,7 +394,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
 static struct nvmet_fc_ls_iod *
 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
 {
-       static struct nvmet_fc_ls_iod *iod;
+       struct nvmet_fc_ls_iod *iod;
        unsigned long flags;
 
        spin_lock_irqsave(&tgtport->lock, flags);
@@ -462,10 +471,10 @@ nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
 static struct nvmet_fc_fcp_iod *
 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
 {
-       static struct nvmet_fc_fcp_iod *fod;
-       unsigned long flags;
+       struct nvmet_fc_fcp_iod *fod;
+
+       lockdep_assert_held(&queue->qlock);
 
-       spin_lock_irqsave(&queue->qlock, flags);
        fod = list_first_entry_or_null(&queue->fod_list,
                                        struct nvmet_fc_fcp_iod, fcp_list);
        if (fod) {
@@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
                 * will "inherit" that reference.
                 */
        }
-       spin_unlock_irqrestore(&queue->qlock, flags);
        return fod;
 }
 
 
+static void
+nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
+                      struct nvmet_fc_tgt_queue *queue,
+                      struct nvmefc_tgt_fcp_req *fcpreq)
+{
+       struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+
+       /*
+        * put all admin cmds on hw queue id 0. All io commands go to
+        * the respective hw queue based on a modulo basis
+        */
+       fcpreq->hwqid = queue->qid ?
+                       ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
+
+       if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
+               queue_work_on(queue->cpu, queue->work_q, &fod->work);
+       else
+               nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
 static void
 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
                        struct nvmet_fc_fcp_iod *fod)
 {
        struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
        struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+       struct nvmet_fc_defer_fcp_req *deferfcp;
        unsigned long flags;
 
        fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
@@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
 
        fcpreq->nvmet_fc_private = NULL;
 
-       spin_lock_irqsave(&queue->qlock, flags);
-       list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
        fod->active = false;
        fod->abort = false;
        fod->aborted = false;
        fod->writedataactive = false;
        fod->fcpreq = NULL;
+
+       tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
+
+       spin_lock_irqsave(&queue->qlock, flags);
+       deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+                               struct nvmet_fc_defer_fcp_req, req_list);
+       if (!deferfcp) {
+               list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+               spin_unlock_irqrestore(&queue->qlock, flags);
+
+               /* Release reference taken at queue lookup and fod allocation */
+               nvmet_fc_tgt_q_put(queue);
+               return;
+       }
+
+       /* Re-use the fod for the next pending cmd that was deferred */
+       list_del(&deferfcp->req_list);
+
+       fcpreq = deferfcp->fcp_req;
+
+       /* deferfcp can be reused for another IO at a later date */
+       list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
+
        spin_unlock_irqrestore(&queue->qlock, flags);
 
+       /* Save NVME CMD IO in fod */
+       memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
+
+       /* Setup new fcpreq to be processed */
+       fcpreq->rspaddr = NULL;
+       fcpreq->rsplen  = 0;
+       fcpreq->nvmet_fc_private = fod;
+       fod->fcpreq = fcpreq;
+       fod->active = true;
+
+       /* inform LLDD IO is now being processed */
+       tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
+
+       /* Submit deferred IO for processing */
+       nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
        /*
-        * release the reference taken at queue lookup and fod allocation
+        * Leave the queue lookup get reference taken when
+        * fod was originally allocated.
         */
-       nvmet_fc_tgt_q_put(queue);
-
-       tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
 }
 
 static int
@@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
        queue->port = assoc->tgtport->port;
        queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
        INIT_LIST_HEAD(&queue->fod_list);
+       INIT_LIST_HEAD(&queue->avail_defer_list);
+       INIT_LIST_HEAD(&queue->pending_cmd_list);
        atomic_set(&queue->connected, 0);
        atomic_set(&queue->sqtail, 0);
        atomic_set(&queue->rsn, 1);
@@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
 {
        struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
        struct nvmet_fc_fcp_iod *fod = queue->fod;
+       struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
        unsigned long flags;
        int i, writedataactive;
        bool disconnect;
@@ -666,6 +733,36 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
                        }
                }
        }
+
+       /* Cleanup defer'ed IOs in queue */
+       list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
+                               req_list) {
+               list_del(&deferfcp->req_list);
+               kfree(deferfcp);
+       }
+
+       for (;;) {
+               deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+                               struct nvmet_fc_defer_fcp_req, req_list);
+               if (!deferfcp)
+                       break;
+
+               list_del(&deferfcp->req_list);
+               spin_unlock_irqrestore(&queue->qlock, flags);
+
+               tgtport->ops->defer_rcv(&tgtport->fc_target_port,
+                               deferfcp->fcp_req);
+
+               tgtport->ops->fcp_abort(&tgtport->fc_target_port,
+                               deferfcp->fcp_req);
+
+               tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
+                               deferfcp->fcp_req);
+
+               kfree(deferfcp);
+
+               spin_lock_irqsave(&queue->qlock, flags);
+       }
        spin_unlock_irqrestore(&queue->qlock, flags);
 
        flush_workqueue(queue->work_q);
@@ -2172,11 +2269,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
  * layer for processing.
  *
- * The nvmet-fc layer will copy cmd payload to an internal structure for
- * processing.  As such, upon completion of the routine, the LLDD may
- * immediately free/reuse the CMD IU buffer passed in the call.
+ * The nvmet_fc layer allocates a local job structure (struct
+ * nvmet_fc_fcp_iod) from the queue for the io and copies the
+ * CMD IU buffer to the job structure. As such, on a successful
+ * completion (returns 0), the LLDD may immediately free/reuse
+ * the CMD IU buffer passed in the call.
  *
- * If this routine returns error, the lldd should abort the exchange.
+ * However, in some circumstances, due to the packetized nature of FC
+ * and the api of the FC LLDD which may issue a hw command to send the
+ * response, but the LLDD may not get the hw completion for that command
+ * and upcall the nvmet_fc layer before a new command may be
+ * asynchronously received - its possible for a command to be received
+ * before the LLDD and nvmet_fc have recycled the job structure. It gives
+ * the appearance of more commands received than fits in the sq.
+ * To alleviate this scenario, a temporary queue is maintained in the
+ * transport for pending LLDD requests waiting for a queue job structure.
+ * In these "overrun" cases, a temporary queue element is allocated
+ * the LLDD request and CMD iu buffer information remembered, and the
+ * routine returns a -EOVERFLOW status. Subsequently, when a queue job
+ * structure is freed, it is immediately reallocated for anything on the
+ * pending request list. The LLDDs defer_rcv() callback is called,
+ * informing the LLDD that it may reuse the CMD IU buffer, and the io
+ * is then started normally with the transport.
+ *
+ * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
+ * the completion as successful but must not reuse the CMD IU buffer
+ * until the LLDD's defer_rcv() callback has been called for the
+ * corresponding struct nvmefc_tgt_fcp_req pointer.
+ *
+ * If there is any other condition in which an error occurs, the
+ * transport will return a non-zero status indicating the error.
+ * In all cases other than -EOVERFLOW, the transport has not accepted the
+ * request and the LLDD should abort the exchange.
  *
  * @target_port: pointer to the (registered) target port the FCP CMD IU
  *              was received on.
@@ -2194,6 +2318,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
        struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
        struct nvmet_fc_tgt_queue *queue;
        struct nvmet_fc_fcp_iod *fod;
+       struct nvmet_fc_defer_fcp_req *deferfcp;
+       unsigned long flags;
 
        /* validate iu, so the connection id can be used to find the queue */
        if ((cmdiubuf_len != sizeof(*cmdiu)) ||
@@ -2214,29 +2340,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
         * when the fod is freed.
         */
 
+       spin_lock_irqsave(&queue->qlock, flags);
+
        fod = nvmet_fc_alloc_fcp_iod(queue);
-       if (!fod) {
+       if (fod) {
+               spin_unlock_irqrestore(&queue->qlock, flags);
+
+               fcpreq->nvmet_fc_private = fod;
+               fod->fcpreq = fcpreq;
+
+               memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+
+               nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
+               return 0;
+       }
+
+       if (!tgtport->ops->defer_rcv) {
+               spin_unlock_irqrestore(&queue->qlock, flags);
                /* release the queue lookup reference */
                nvmet_fc_tgt_q_put(queue);
                return -ENOENT;
        }
 
-       fcpreq->nvmet_fc_private = fod;
-       fod->fcpreq = fcpreq;
-       /*
-        * put all admin cmds on hw queue id 0. All io commands go to
-        * the respective hw queue based on a modulo basis
-        */
-       fcpreq->hwqid = queue->qid ?
-                       ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
-       memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+       deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
+                       struct nvmet_fc_defer_fcp_req, req_list);
+       if (deferfcp) {
+               /* Just re-use one that was previously allocated */
+               list_del(&deferfcp->req_list);
+       } else {
+               spin_unlock_irqrestore(&queue->qlock, flags);
 
-       if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
-               queue_work_on(queue->cpu, queue->work_q, &fod->work);
-       else
-               nvmet_fc_handle_fcp_rqst(tgtport, fod);
+               /* Now we need to dynamically allocate one */
+               deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
+               if (!deferfcp) {
+                       /* release the queue lookup reference */
+                       nvmet_fc_tgt_q_put(queue);
+                       return -ENOMEM;
+               }
+               spin_lock_irqsave(&queue->qlock, flags);
+       }
 
-       return 0;
+       /* For now, use rspaddr / rsplen to save payload information */
+       fcpreq->rspaddr = cmdiubuf;
+       fcpreq->rsplen  = cmdiubuf_len;
+       deferfcp->fcp_req = fcpreq;
+
+       /* defer processing till a fod becomes available */
+       list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
+
+       /* NOTE: the queue lookup reference is still valid */
+
+       spin_unlock_irqrestore(&queue->qlock, flags);
+
+       return -EOVERFLOW;
 }
 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
 
index 28c38c756f92858906ca2aee54a9c922522f9253..e0a28ea341fe95e84a866559dad8e317c8056401 100644 (file)
@@ -89,6 +89,7 @@ int of_dma_configure(struct device *dev, struct device_node *np)
        bool coherent;
        unsigned long offset;
        const struct iommu_ops *iommu;
+       u64 mask;
 
        /*
         * Set default coherent_dma_mask to 32 bit.  Drivers are expected to
@@ -134,10 +135,9 @@ int of_dma_configure(struct device *dev, struct device_node *np)
         * Limit coherent and dma mask based on size and default mask
         * set by the driver.
         */
-       dev->coherent_dma_mask = min(dev->coherent_dma_mask,
-                                    DMA_BIT_MASK(ilog2(dma_addr + size)));
-       *dev->dma_mask = min((*dev->dma_mask),
-                            DMA_BIT_MASK(ilog2(dma_addr + size)));
+       mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
+       dev->coherent_dma_mask &= mask;
+       *dev->dma_mask &= mask;
 
        coherent = of_dma_is_coherent(np);
        dev_dbg(dev, "device is%sdma coherent\n",
index 5c63b920b4713eaf41954c4e8f0d041472c78b6d..ed92c1254cff473113f9b24e25a598a72871891d 100644 (file)
@@ -956,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev)
 
        dino_dev->hba.dev = dev;
        dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
-       dino_dev->hba.lmmio_space_offset = 0;   /* CPU addrs == bus addrs */
+       dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
        spin_lock_init(&dino_dev->dinosaur_pen);
        dino_dev->hba.iommu = ccio_get_iommu(dev);
 
index af0cc3456dc1b48b1325c06c5edd2ca8cc22a640..da5570cf5c6a4eddf115da1e18cfd6c1643c88e2 100644 (file)
@@ -522,10 +522,11 @@ struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
                bridge = pci_upstream_bridge(bridge);
        }
 
-       if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
-               return NULL;
+       if (highest_pcie_bridge &&
+           pci_pcie_type(highest_pcie_bridge) == PCI_EXP_TYPE_ROOT_PORT)
+               return highest_pcie_bridge;
 
-       return highest_pcie_bridge;
+       return NULL;
 }
 EXPORT_SYMBOL(pci_find_pcie_root_port);
 
@@ -4259,6 +4260,41 @@ int pci_reset_function(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_reset_function);
 
+/**
+ * pci_reset_function_locked - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device.  The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device.  This function differs
+ * from __pci_reset_function() in that it saves and restores device state
+ * over the reset.  It also differs from pci_reset_function() in that it
+ * requires the PCI device lock to be held.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function_locked(struct pci_dev *dev)
+{
+       int rc;
+
+       rc = pci_probe_reset_function(dev);
+       if (rc)
+               return rc;
+
+       pci_dev_save_and_disable(dev);
+
+       rc = __pci_reset_function_locked(dev);
+
+       pci_dev_restore(dev);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function_locked);
+
 /**
  * pci_try_reset_function - quiesce and reset a PCI device function
  * @dev: PCI device to reset
index c31310db04047367c44b96ff6afa41a6b991bc28..e6a917b4acd3f3c63dff5d57b905ab093d7d0487 100644 (file)
@@ -1762,6 +1762,48 @@ static void pci_configure_extended_tags(struct pci_dev *dev)
                                         PCI_EXP_DEVCTL_EXT_TAG);
 }
 
+/**
+ * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
+ * @dev: PCI device to query
+ *
+ * Returns true if the device has enabled relaxed ordering attribute.
+ */
+bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
+{
+       u16 v;
+
+       pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
+
+       return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
+}
+EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
+
+static void pci_configure_relaxed_ordering(struct pci_dev *dev)
+{
+       struct pci_dev *root;
+
+       /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
+       if (dev->is_virtfn)
+               return;
+
+       if (!pcie_relaxed_ordering_enabled(dev))
+               return;
+
+       /*
+        * For now, we only deal with Relaxed Ordering issues with Root
+        * Ports. Peer-to-Peer DMA is another can of worms.
+        */
+       root = pci_find_pcie_root_port(dev);
+       if (!root)
+               return;
+
+       if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
+               pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
+                                          PCI_EXP_DEVCTL_RELAX_EN);
+               dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
+       }
+}
+
 static void pci_configure_device(struct pci_dev *dev)
 {
        struct hotplug_params hpp;
@@ -1769,6 +1811,7 @@ static void pci_configure_device(struct pci_dev *dev)
 
        pci_configure_mps(dev);
        pci_configure_extended_tags(dev);
+       pci_configure_relaxed_ordering(dev);
 
        memset(&hpp, 0, sizeof(hpp));
        ret = pci_get_hp_params(dev, &hpp);
index 6967c6b4cf6b017170619c285ee58feaa5975a5a..140760403f36a932b7d39eb73ae3eb96d76cab37 100644 (file)
@@ -4015,6 +4015,95 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
 DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
                              quirk_tw686x_class);
 
+/*
+ * Some devices have problems with Transaction Layer Packets with the Relaxed
+ * Ordering Attribute set.  Such devices should mark themselves and other
+ * Device Drivers should check before sending TLPs with RO set.
+ */
+static void quirk_relaxedordering_disable(struct pci_dev *dev)
+{
+       dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
+       dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
+}
+
+/*
+ * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
+ * Complex has a Flow Control Credit issue which can cause performance
+ * problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
+ */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+
+/*
+ * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex
+ * where Upstream Transaction Layer Packets with the Relaxed Ordering
+ * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
+ * set.  This is a violation of the PCIe 3.0 Transaction Ordering Rules
+ * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0
+ * November 10, 2010).  As a result, on this platform we can't use Relaxed
+ * Ordering for Upstream TLPs.
+ */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
+                             quirk_relaxedordering_disable);
+
 /*
  * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
  * values for the Attribute as were supplied in the header of the
index b0c68d24db011ba9465dd31a41c05acd4f13c283..da5bdbdcce527262489cae939761b9cf1834eeb0 100644 (file)
@@ -3351,6 +3351,16 @@ static void ipr_worker_thread(struct work_struct *work)
                return;
        }
 
+       if (ioa_cfg->scsi_unblock) {
+               ioa_cfg->scsi_unblock = 0;
+               ioa_cfg->scsi_blocked = 0;
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               scsi_unblock_requests(ioa_cfg->host);
+               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+               if (ioa_cfg->scsi_blocked)
+                       scsi_block_requests(ioa_cfg->host);
+       }
+
        if (!ioa_cfg->scan_enabled) {
                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                return;
@@ -7211,9 +7221,8 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
        ENTER;
        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
                ipr_trace;
-               spin_unlock_irq(ioa_cfg->host->host_lock);
-               scsi_unblock_requests(ioa_cfg->host);
-               spin_lock_irq(ioa_cfg->host->host_lock);
+               ioa_cfg->scsi_unblock = 1;
+               schedule_work(&ioa_cfg->work_q);
        }
 
        ioa_cfg->in_reset_reload = 0;
@@ -7287,13 +7296,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        wake_up_all(&ioa_cfg->reset_wait_q);
 
-       spin_unlock(ioa_cfg->host->host_lock);
-       scsi_unblock_requests(ioa_cfg->host);
-       spin_lock(ioa_cfg->host->host_lock);
-
-       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
-               scsi_block_requests(ioa_cfg->host);
-
+       ioa_cfg->scsi_unblock = 1;
        schedule_work(&ioa_cfg->work_q);
        LEAVE;
        return IPR_RC_JOB_RETURN;
@@ -9249,8 +9252,11 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
        wmb();
-       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+               ioa_cfg->scsi_unblock = 0;
+               ioa_cfg->scsi_blocked = 1;
                scsi_block_requests(ioa_cfg->host);
+       }
 
        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
        ioa_cfg->reset_cmd = ipr_cmd;
@@ -9306,9 +9312,8 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
                        wake_up_all(&ioa_cfg->reset_wait_q);
 
                        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
-                               spin_unlock_irq(ioa_cfg->host->host_lock);
-                               scsi_unblock_requests(ioa_cfg->host);
-                               spin_lock_irq(ioa_cfg->host->host_lock);
+                               ioa_cfg->scsi_unblock = 1;
+                               schedule_work(&ioa_cfg->work_q);
                        }
                        return;
                } else {
index e98a87a653357b54996326d6f422d2ffa7923bee..c7f0e9e3cd7d4986c1341c3b7e115e65de551723 100644 (file)
@@ -1488,6 +1488,8 @@ struct ipr_ioa_cfg {
        u8 cfg_locked:1;
        u8 clear_isr:1;
        u8 probe_done:1;
+       u8 scsi_unblock:1;
+       u8 scsi_blocked:1;
 
        u8 revid;
 
index 4ed48ed38e79316f02ca1e299e56f66eea84ba8e..7ee1a94c0b33eefd57a6889df66649477ad4713b 100644 (file)
@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                                atomic_read(&tgtp->xmt_ls_rsp_error));
 
                len += snprintf(buf+len, PAGE_SIZE-len,
-                               "FCP: Rcv %08x Release %08x Drop %08x\n",
+                               "FCP: Rcv %08x Defer %08x Release %08x "
+                               "Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->rcv_fcp_cmd_defer),
                                atomic_read(&tgtp->xmt_fcp_release),
                                atomic_read(&tgtp->rcv_fcp_cmd_drop));
 
index 5cc8b0f7d885fb0dfd5a00342f6ca72c72a40129..744f3f395b64852a294a9300adb64a496087aed7 100644 (file)
@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                atomic_read(&tgtp->xmt_ls_rsp_error));
 
                len += snprintf(buf + len, size - len,
-                               "FCP: Rcv %08x Drop %08x\n",
+                               "FCP: Rcv %08x Defer %08x Release %08x "
+                               "Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->rcv_fcp_cmd_defer),
+                               atomic_read(&tgtp->xmt_fcp_release),
                                atomic_read(&tgtp->rcv_fcp_cmd_drop));
 
                if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
index fbeec344c6cc3be0bdd878db6bafda7353dcf901..bbbd0f84160d36563008a212afd8252f86ef15c8 100644 (file)
@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
 }
 
+static void
+lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
+                    struct nvmefc_tgt_fcp_req *rsp)
+{
+       struct lpfc_nvmet_tgtport *tgtp;
+       struct lpfc_nvmet_rcv_ctx *ctxp =
+               container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+       struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+       struct lpfc_hba *phba = ctxp->phba;
+
+       lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
+                        ctxp->oxid, ctxp->size, smp_processor_id());
+
+       tgtp = phba->targetport->private;
+       atomic_inc(&tgtp->rcv_fcp_cmd_defer);
+       lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+}
+
 static struct nvmet_fc_target_template lpfc_tgttemplate = {
        .targetport_delete = lpfc_nvmet_targetport_delete,
        .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
        .fcp_op         = lpfc_nvmet_xmt_fcp_op,
        .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
        .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
+       .defer_rcv      = lpfc_nvmet_defer_rcv,
 
        .max_hw_queues  = 1,
        .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
                return;
        }
 
+       /* Processing of FCP command is deferred */
+       if (rc == -EOVERFLOW) {
+               lpfc_nvmeio_data(phba,
+                                "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
+                                oxid, size, sid);
+               /* defer reposting rcv buffer till .defer_rcv callback */
+               ctxp->rqb_buffer = nvmebuf;
+               atomic_inc(&tgtp->rcv_fcp_cmd_out);
+               return;
+       }
+
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                        "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
index e675ef17be08a0f67dd76f9d33a2ab831f961ce8..48a76788b003cb746afa45376272af362c4446ce 100644 (file)
@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
        atomic_t rcv_fcp_cmd_in;
        atomic_t rcv_fcp_cmd_out;
        atomic_t rcv_fcp_cmd_drop;
+       atomic_t rcv_fcp_cmd_defer;
        atomic_t xmt_fcp_release;
 
        /* Stats counters - lpfc_nvmet_xmt_fcp_op */
index 33142610882f4d9bedce93076160180513ca5f82..b18646d6057f476e70d59137a0450e5f90d4b3fc 100644 (file)
@@ -401,9 +401,6 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
                for (i = 0; i < vha->hw->max_req_queues; i++) {
                        struct req_que *req = vha->hw->req_q_map[i];
 
-                       if (!test_bit(i, vha->hw->req_qid_map))
-                               continue;
-
                        if (req || !buf) {
                                length = req ?
                                    req->length : REQUEST_ENTRY_CNT_24XX;
@@ -418,9 +415,6 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
                for (i = 0; i < vha->hw->max_rsp_queues; i++) {
                        struct rsp_que *rsp = vha->hw->rsp_q_map[i];
 
-                       if (!test_bit(i, vha->hw->rsp_qid_map))
-                               continue;
-
                        if (rsp || !buf) {
                                length = rsp ?
                                    rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -660,9 +654,6 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
                for (i = 0; i < vha->hw->max_req_queues; i++) {
                        struct req_que *req = vha->hw->req_q_map[i];
 
-                       if (!test_bit(i, vha->hw->req_qid_map))
-                               continue;
-
                        if (req || !buf) {
                                qla27xx_insert16(i, buf, len);
                                qla27xx_insert16(1, buf, len);
@@ -675,9 +666,6 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
                for (i = 0; i < vha->hw->max_rsp_queues; i++) {
                        struct rsp_que *rsp = vha->hw->rsp_q_map[i];
 
-                       if (!test_bit(i, vha->hw->rsp_qid_map))
-                               continue;
-
                        if (rsp || !buf) {
                                qla27xx_insert16(i, buf, len);
                                qla27xx_insert16(1, buf, len);
index b20da0d27ad78494bc9a0172d428dc176cbcb852..3f82ea1b72dc8739283992b4f425d77692b51e7b 100644 (file)
@@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
 static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
 {
        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
-       unsigned long flags;
 
        /*
         * Ensure that the complete FCP WRITE payload has been received.
@@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
         */
        cmd->cmd_in_wq = 0;
 
-       spin_lock_irqsave(&cmd->cmd_lock, flags);
-       cmd->data_work = 1;
-       if (cmd->aborted) {
-               cmd->data_work_free = 1;
-               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
-               tcm_qla2xxx_free_cmd(cmd);
-               return;
-       }
-       spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
        cmd->qpair->tgt_counters.qla_core_ret_ctio++;
        if (!cmd->write_data_transferred) {
                /*
@@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
        qlt_xmit_tm_rsp(mcmd);
 }
 
-#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)
 static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
-       unsigned long flags;
 
        if (qlt_abort_cmd(cmd))
                return;
-
-       spin_lock_irqsave(&cmd->cmd_lock, flags);
-       if ((cmd->state == QLA_TGT_STATE_NEW)||
-           ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
-               DATA_WORK_NOT_FREE(cmd))) {
-               cmd->data_work_free = 1;
-               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-               /*
-                * cmd has not reached fw, Use this trigger to free it.
-                */
-               tcm_qla2xxx_free_cmd(cmd);
-               return;
-       }
-       spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-       return;
-
 }
 
 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
index f1cdf32d7514120b31515ddf2901960d4f766694..8927f9f54ad926f5b50439e23685a35b24c50edf 100644 (file)
@@ -99,7 +99,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
 
        ret =  scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
                                NULL, SES_TIMEOUT, SES_RETRIES, NULL);
-       if (unlikely(!ret))
+       if (unlikely(ret))
                return ret;
 
        recv_page_code = ((unsigned char *)buf)[0];
index 8e5013d9cad445178a0461edd8eaf9abe4e84c38..94e402ed30f6ae54ecb32160480d554e6e118827 100644 (file)
@@ -4299,11 +4299,11 @@ static int st_probe(struct device *dev)
        kref_init(&tpnt->kref);
        tpnt->disk = disk;
        disk->private_data = &tpnt->driver;
-       disk->queue = SDp->request_queue;
        /* SCSI tape doesn't register this gendisk via add_disk().  Manually
         * take queue reference that release_disk() expects. */
-       if (!blk_get_queue(disk->queue))
+       if (!blk_get_queue(SDp->request_queue))
                goto out_put_disk;
+       disk->queue = SDp->request_queue;
        tpnt->driver = &st_template;
 
        tpnt->device = SDp;
index 3039072911a5bce09d375c04b9faf65071894c11..afc7ecc3c1876158d33e45933458fc035593c39f 100644 (file)
@@ -200,16 +200,11 @@ static int imx7_pgc_domain_probe(struct platform_device *pdev)
 
        domain->dev = &pdev->dev;
 
-       ret = pm_genpd_init(&domain->genpd, NULL, true);
-       if (ret) {
-               dev_err(domain->dev, "Failed to init power domain\n");
-               return ret;
-       }
-
        domain->regulator = devm_regulator_get_optional(domain->dev, "power");
        if (IS_ERR(domain->regulator)) {
                if (PTR_ERR(domain->regulator) != -ENODEV) {
-                       dev_err(domain->dev, "Failed to get domain's regulator\n");
+                       if (PTR_ERR(domain->regulator) != -EPROBE_DEFER)
+                               dev_err(domain->dev, "Failed to get domain's regulator\n");
                        return PTR_ERR(domain->regulator);
                }
        } else {
@@ -217,6 +212,12 @@ static int imx7_pgc_domain_probe(struct platform_device *pdev)
                                      domain->voltage, domain->voltage);
        }
 
+       ret = pm_genpd_init(&domain->genpd, NULL, true);
+       if (ret) {
+               dev_err(domain->dev, "Failed to init power domain\n");
+               return ret;
+       }
+
        ret = of_genpd_add_provider_simple(domain->dev->of_node,
                                           &domain->genpd);
        if (ret) {
index b0b283810e72245cb24d0f5ab1efc468d55e52c0..de31b9389e2ee710ed48677bcddbe64e88a26284 100644 (file)
@@ -176,6 +176,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
 
        ti_sci_pd->dev = dev;
 
+       ti_sci_pd->pd.name = "ti_sci_pd";
+
        ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev;
        ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev;
 
index ca11be21f64b606091d5e8e0e62044ae263dc3ab..34ca7823255d692d05aa753e7c5bc28a385c5583 100644 (file)
@@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
                        continue;
                }
 
+               set_current_state(TASK_RUNNING);
                wp = async->buf_write_ptr;
                n1 = min(n, async->prealloc_bufsz - wp);
                n2 = n - n1;
@@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
                        }
                        continue;
                }
+
+               set_current_state(TASK_RUNNING);
                rp = async->buf_read_ptr;
                n1 = min(n, async->prealloc_bufsz - rp);
                n2 = n - n1;
index a6a8393d66645e75c13ecf8f67ca9c71ef48a6e5..3e00df74b18c883d4e19f0bac7ec087988b58821 100644 (file)
@@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
                             long m)
 {
        struct ad2s1210_state *st = iio_priv(indio_dev);
-       bool negative;
+       u16 negative;
        int ret = 0;
        u16 pos;
        s16 vel;
index e583dd8a418b537eda69a9606fa8a3fc2c6a6207..d4fa41be80f9a1719574af28c8981ef8e8d287ca 100644 (file)
@@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
 
        if (!cnp) {
                pr_info("%s stid %d lookup failure\n", __func__, stid);
-               return;
+               goto rel_skb;
        }
 
        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
        cxgbit_put_cnp(cnp);
+rel_skb:
+       __kfree_skb(skb);
 }
 
 static void
@@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
 
        if (!cnp) {
                pr_info("%s stid %d lookup failure\n", __func__, stid);
-               return;
+               goto rel_skb;
        }
 
        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
        cxgbit_put_cnp(cnp);
+rel_skb:
+       __kfree_skb(skb);
 }
 
 static void
@@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
        struct tid_info *t = lldi->tids;
 
        csk = lookup_tid(t, tid);
-       if (unlikely(!csk))
+       if (unlikely(!csk)) {
                pr_err("can't find connection for tid %u.\n", tid);
-       else
+               goto rel_skb;
+       } else {
                cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
+       }
 
        cxgbit_put_csk(csk);
+rel_skb:
+       __kfree_skb(skb);
 }
 
 static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
index dda13f1af38e581f746e4528590510c07efb916f..514986b57c2d60ce19c1074f4d19d65dd550be2e 100644 (file)
@@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
 
 static void
 cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
-                     unsigned int nents)
+                     unsigned int nents, u32 skip)
 {
        struct skb_seq_state st;
        const u8 *buf;
@@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
                }
 
                consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
-                                                buf_len, consumed);
+                                                buf_len, skip + consumed);
        }
 }
 
@@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
                struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
                u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
 
-               cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents);
+               cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
        }
 
        cmd->write_data_done += pdu_cb->dlen;
@@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
                  cmd->se_cmd.data_length);
 
        if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
+               u32 skip = data_offset % PAGE_SIZE;
+
                sg_off = data_offset / PAGE_SIZE;
                sg_start = &cmd->se_cmd.t_data_sg[sg_off];
-               sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE));
+               sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
 
-               cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents);
+               cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
        }
 
 check_payload:
index 74e4975dd1b1e74d6c39517be102ccdc92614e80..5001261f5d69d759dd25161401510551d35c3da7 100644 (file)
@@ -418,6 +418,7 @@ int iscsit_reset_np_thread(
                return 0;
        }
        np->np_thread_state = ISCSI_NP_THREAD_RESET;
+       atomic_inc(&np->np_reset_count);
 
        if (np->np_thread) {
                spin_unlock_bh(&np->np_thread_lock);
@@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
        cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
        cmd->data_direction     = DMA_NONE;
+       kfree(cmd->text_in_ptr);
        cmd->text_in_ptr        = NULL;
 
        return 0;
@@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
                return text_length;
 
        if (completed) {
-               hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+               hdr->flags = ISCSI_FLAG_CMD_FINAL;
        } else {
-               hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
+               hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
                cmd->read_data_done += text_length;
                if (cmd->targ_xfer_tag == 0xFFFFFFFF)
                        cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
index e9bdc8b86e7d1d71d77cf4388370af6fcf3fded7..dc13afbd4c88dec2390ca8e65b3332d2f78acd73 100644 (file)
@@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        flush_signals(current);
 
        spin_lock_bh(&np->np_thread_lock);
-       if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+       if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
                np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+               spin_unlock_bh(&np->np_thread_lock);
                complete(&np->np_restart_comp);
+               return 1;
        } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
                spin_unlock_bh(&np->np_thread_lock);
                goto exit;
@@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
                goto exit;
        } else if (rc < 0) {
                spin_lock_bh(&np->np_thread_lock);
-               if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+               if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+                       np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
                        spin_unlock_bh(&np->np_thread_lock);
                        complete(&np->np_restart_comp);
                        iscsit_put_transport(conn->conn_transport);
index 36913734c6bc58ed326ac793dc19d3d265ec878d..02e8a5d8665837f415ac7b2d78e51067d1ef5b62 100644 (file)
@@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
        mutex_lock(&tpg->acl_node_mutex);
        if (acl->dynamic_node_acl)
                acl->dynamic_node_acl = 0;
-       list_del(&acl->acl_list);
+       list_del_init(&acl->acl_list);
        mutex_unlock(&tpg->acl_node_mutex);
 
        target_shutdown_sessions(acl);
@@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
         * in transport_deregister_session().
         */
        list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
-               list_del(&nacl->acl_list);
+               list_del_init(&nacl->acl_list);
 
                core_tpg_wait_for_nacl_pr_ref(nacl);
                core_free_device_list_for_node(nacl, se_tpg);
index 97fed9a298bdc29a19d184ba49df458e04e4f9f1..836d552b0385e978bc1a0b98c59a3379c262fd61 100644 (file)
@@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref)
        }
 
        mutex_lock(&se_tpg->acl_node_mutex);
-       list_del(&nacl->acl_list);
+       list_del_init(&nacl->acl_list);
        mutex_unlock(&se_tpg->acl_node_mutex);
 
        core_tpg_wait_for_nacl_pr_ref(nacl);
@@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess)
                        spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
 
                        if (se_nacl->dynamic_stop)
-                               list_del(&se_nacl->acl_list);
+                               list_del_init(&se_nacl->acl_list);
                }
                mutex_unlock(&se_tpg->acl_node_mutex);
 
index 80ee130f8253ec44324ac2ba616cfd93beb1ba76..942d094269fba5db66ff7e791dcfaab1c6acec15 100644 (file)
@@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev,
                                        block_remaining);
                        to_offset = get_block_offset_user(udev, dbi,
                                        block_remaining);
-                       offset = DATA_BLOCK_SIZE - block_remaining;
-                       to += offset;
 
                        if (*iov_cnt != 0 &&
                            to_offset == iov_tail(*iov)) {
@@ -575,8 +573,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
                                (*iov)->iov_len = copy_bytes;
                        }
                        if (copy_data) {
-                               memcpy(to, from + sg->length - sg_remaining,
-                                       copy_bytes);
+                               offset = DATA_BLOCK_SIZE - block_remaining;
+                               memcpy(to + offset,
+                                      from + sg->length - sg_remaining,
+                                      copy_bytes);
                                tcmu_flush_dcache_range(to, copy_bytes);
                        }
                        sg_remaining -= copy_bytes;
@@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
                        copy_bytes = min_t(size_t, sg_remaining,
                                        block_remaining);
                        offset = DATA_BLOCK_SIZE - block_remaining;
-                       from += offset;
                        tcmu_flush_dcache_range(from, copy_bytes);
-                       memcpy(to + sg->length - sg_remaining, from,
+                       memcpy(to + sg->length - sg_remaining, from + offset,
                                        copy_bytes);
 
                        sg_remaining -= copy_bytes;
@@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev)
        if (udev->dev_config[0])
                snprintf(str + used, size - used, "/%s", udev->dev_config);
 
+       /* If the old string exists, free it */
+       kfree(info->name);
        info->name = str;
 
        return 0;
index 308b6e17c88aace0775b4ed0a5460097559d66ca..fe2f00ceafc5d7e1a3bbafea4224c838dc2158a4 100644 (file)
@@ -333,6 +333,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw,
        int res;
        enum tb_port_type type;
 
+       /*
+        * Some DROMs list more ports than the controller actually has
+        * so we skip those but allow the parser to continue.
+        */
+       if (header->index > sw->config.max_port_number) {
+               dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
+               return 0;
+       }
+
        port = &sw->ports[header->index];
        port->disabled = header->port_disabled;
        if (port->disabled)
index 284749fb0f6b96d3d6d667332e569ba0d745e048..1fc80ea87c13c05bcda2a1cb5daf1c3a8d611024 100644 (file)
@@ -793,6 +793,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        struct tty_struct *tty;
        struct path *pts_path;
        struct dentry *dentry;
+       struct vfsmount *mnt;
        int retval;
        int index;
 
@@ -805,7 +806,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        if (retval)
                return retval;
 
-       fsi = devpts_acquire(filp);
+       fsi = devpts_acquire(filp, &mnt);
        if (IS_ERR(fsi)) {
                retval = PTR_ERR(fsi);
                goto out_free_file;
@@ -849,7 +850,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        pts_path = kmalloc(sizeof(struct path), GFP_KERNEL);
        if (!pts_path)
                goto err_release;
-       pts_path->mnt = filp->f_path.mnt;
+       pts_path->mnt = mnt;
        pts_path->dentry = dentry;
        path_get(pts_path);
        tty->link->driver_data = pts_path;
@@ -866,6 +867,7 @@ err_path_put:
        path_put(pts_path);
        kfree(pts_path);
 err_release:
+       mntput(mnt);
        tty_unlock(tty);
        // This will also put-ref the fsi
        tty_release(inode, filp);
@@ -874,6 +876,7 @@ out:
        devpts_kill_index(fsi, index);
 out_put_fsi:
        devpts_release(fsi);
+       mntput(mnt);
 out_free_file:
        tty_free_file(filp);
        return retval;
index b5def356af63b70e3ebc2e23a48da0753e41b47e..1aab3010fbfae76e2c25cb60085f21051a1f24cf 100644 (file)
@@ -1043,13 +1043,24 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
                if (up->dl_write)
                        uart->dl_write = up->dl_write;
 
-               if (serial8250_isa_config != NULL)
-                       serial8250_isa_config(0, &uart->port,
-                                       &uart->capabilities);
+               if (uart->port.type != PORT_8250_CIR) {
+                       if (serial8250_isa_config != NULL)
+                               serial8250_isa_config(0, &uart->port,
+                                               &uart->capabilities);
+
+                       ret = uart_add_one_port(&serial8250_reg,
+                                               &uart->port);
+                       if (ret == 0)
+                               ret = uart->port.line;
+               } else {
+                       dev_info(uart->port.dev,
+                               "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
+                               uart->port.iobase,
+                               (unsigned long long)uart->port.mapbase,
+                               uart->port.irq);
 
-               ret = uart_add_one_port(&serial8250_reg, &uart->port);
-               if (ret == 0)
-                       ret = uart->port.line;
+                       ret = 0;
+               }
        }
        mutex_unlock(&serial_mutex);
 
index 8a857bb34fbb26c6d60784d3fe7576730a9aa5b3..1888d168a41c87c605962da2605df8ab1c02bd20 100644 (file)
@@ -142,15 +142,7 @@ static struct vendor_data vendor_sbsa = {
        .fixed_options          = true,
 };
 
-/*
- * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
- * occasionally getting stuck as 1. To avoid the potential for a hang, check
- * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
- * implementations, so only do so if an affected platform is detected in
- * parse_spcr().
- */
-static bool qdf2400_e44_present = false;
-
+#ifdef CONFIG_ACPI_SPCR_TABLE
 static struct vendor_data vendor_qdt_qdf2400_e44 = {
        .reg_offset             = pl011_std_offsets,
        .fr_busy                = UART011_FR_TXFE,
@@ -165,6 +157,7 @@ static struct vendor_data vendor_qdt_qdf2400_e44 = {
        .always_enabled         = true,
        .fixed_options          = true,
 };
+#endif
 
 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
        [REG_DR] = UART01x_DR,
@@ -2375,12 +2368,14 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
        resource_size_t addr;
        int i;
 
-       if (strcmp(name, "qdf2400_e44") == 0) {
-               pr_info_once("UART: Working around QDF2400 SoC erratum 44");
-               qdf2400_e44_present = true;
-       } else if (strcmp(name, "pl011") != 0) {
+       /*
+        * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
+        * have a distinct console name, so make sure we check for that.
+        * The actual implementation of the erratum occurs in the probe
+        * function.
+        */
+       if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
                return -ENODEV;
-       }
 
        if (uart_parse_earlycon(options, &iotype, &addr, &options))
                return -ENODEV;
@@ -2734,11 +2729,17 @@ static int sbsa_uart_probe(struct platform_device *pdev)
        }
        uap->port.irq   = ret;
 
-       uap->reg_offset = vendor_sbsa.reg_offset;
-       uap->vendor     = qdf2400_e44_present ?
-                                       &vendor_qdt_qdf2400_e44 : &vendor_sbsa;
+#ifdef CONFIG_ACPI_SPCR_TABLE
+       if (qdf2400_e44_present) {
+               dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
+               uap->vendor = &vendor_qdt_qdf2400_e44;
+       } else
+#endif
+               uap->vendor = &vendor_sbsa;
+
+       uap->reg_offset = uap->vendor->reg_offset;
        uap->fifosize   = 32;
-       uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;
+       uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
        uap->port.ops   = &sbsa_uart_pops;
        uap->fixed_baud = baudrate;
 
index ab1bb3b538ac6175dd1fbe5babb686a6a8f4bc49..7f277b092b5bf070c21d9c321d15b2e0d630792e 100644 (file)
@@ -1888,7 +1888,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
        /* No more submits can occur */
        spin_lock_irq(&hcd_urb_list_lock);
 rescan:
-       list_for_each_entry (urb, &ep->urb_list, urb_list) {
+       list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
                int     is_in;
 
                if (urb->unlinked)
@@ -2485,6 +2485,8 @@ void usb_hc_died (struct usb_hcd *hcd)
        }
        if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
                hcd = hcd->shared_hcd;
+               clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
+               set_bit(HCD_FLAG_DEAD, &hcd->flags);
                if (hcd->rh_registered) {
                        clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
 
index 6e6797d145dd80136c413129641bdbbae2f1e63b..822f8c50e4233c70d159a4e374ad66b49502c0c1 100644 (file)
@@ -4725,7 +4725,8 @@ hub_power_remaining(struct usb_hub *hub)
 static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
                u16 portchange)
 {
-       int status, i;
+       int status = -ENODEV;
+       int i;
        unsigned unit_load;
        struct usb_device *hdev = hub->hdev;
        struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@@ -4929,9 +4930,10 @@ loop:
 
 done:
        hub_port_disable(hub, port1, 1);
-       if (hcd->driver->relinquish_port && !hub->hdev->parent)
-               hcd->driver->relinquish_port(hcd, port1);
-
+       if (hcd->driver->relinquish_port && !hub->hdev->parent) {
+               if (status != -ENOTCONN && status != -ENODEV)
+                       hcd->driver->relinquish_port(hcd, port1);
+       }
 }
 
 /* Handle physical or logical connection change events.
index 3116edfcdc18558aa768d248f1ff1881448ced09..574da2b4529cc26cc0a97ee7146e42889ca55bbe 100644 (file)
@@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* appletouch */
        { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
+       { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Avision AV600U */
        { USB_DEVICE(0x0638, 0x0a13), .driver_info =
          USB_QUIRK_STRING_FETCH_255 },
@@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
        { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
        { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
        { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
+       { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
 
        /* Logitech Optical Mouse M90/M100 */
        { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
index 6b299c7b765611e0cb6c0d30e5de50f8ef279cb7..f064f1549333dcd7dab10fd491e43cf31bac887e 100644 (file)
@@ -896,9 +896,40 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
                if (!node) {
                        trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
 
+                       /*
+                        * USB Specification 2.0 Section 5.9.2 states that: "If
+                        * there is only a single transaction in the microframe,
+                        * only a DATA0 data packet PID is used.  If there are
+                        * two transactions per microframe, DATA1 is used for
+                        * the first transaction data packet and DATA0 is used
+                        * for the second transaction data packet.  If there are
+                        * three transactions per microframe, DATA2 is used for
+                        * the first transaction data packet, DATA1 is used for
+                        * the second, and DATA0 is used for the third."
+                        *
+                        * IOW, we should satisfy the following cases:
+                        *
+                        * 1) length <= maxpacket
+                        *      - DATA0
+                        *
+                        * 2) maxpacket < length <= (2 * maxpacket)
+                        *      - DATA1, DATA0
+                        *
+                        * 3) (2 * maxpacket) < length <= (3 * maxpacket)
+                        *      - DATA2, DATA1, DATA0
+                        */
                        if (speed == USB_SPEED_HIGH) {
                                struct usb_ep *ep = &dep->endpoint;
-                               trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
+                               unsigned int mult = ep->mult - 1;
+                               unsigned int maxp = usb_endpoint_maxp(ep->desc);
+
+                               if (length <= (2 * maxp))
+                                       mult--;
+
+                               if (length <= maxp)
+                                       mult--;
+
+                               trb->size |= DWC3_TRB_SIZE_PCM1(mult);
                        }
                } else {
                        trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
index 62dc9c7798e78a034a9e63f8c3163fcebec9e57a..e1de8fe599a35695eceda22d3138492af8995b20 100644 (file)
@@ -838,21 +838,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep
        return usb3_req;
 }
 
-static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
-                             struct renesas_usb3_request *usb3_req, int status)
+static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+                               struct renesas_usb3_request *usb3_req,
+                               int status)
 {
        struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
-       unsigned long flags;
 
        dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
                usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
                status);
        usb3_req->req.status = status;
-       spin_lock_irqsave(&usb3->lock, flags);
        usb3_ep->started = false;
        list_del_init(&usb3_req->queue);
-       spin_unlock_irqrestore(&usb3->lock, flags);
+       spin_unlock(&usb3->lock);
        usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
+       spin_lock(&usb3->lock);
+}
+
+static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+                             struct renesas_usb3_request *usb3_req, int status)
+{
+       struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+       unsigned long flags;
+
+       spin_lock_irqsave(&usb3->lock, flags);
+       __usb3_request_done(usb3_ep, usb3_req, status);
+       spin_unlock_irqrestore(&usb3->lock, flags);
 }
 
 static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
index c8989c62a2621b88cf8b9d0c3001a37a31d5e151..c8f38649f749311a81ff63aa194c1b97cb71718f 100644 (file)
@@ -98,6 +98,7 @@ enum amd_chipset_gen {
        AMD_CHIPSET_HUDSON2,
        AMD_CHIPSET_BOLTON,
        AMD_CHIPSET_YANGTZE,
+       AMD_CHIPSET_TAISHAN,
        AMD_CHIPSET_UNKNOWN,
 };
 
@@ -141,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
                        pinfo->sb_type.gen = AMD_CHIPSET_SB700;
                else if (rev >= 0x40 && rev <= 0x4f)
                        pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+       }
+       pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+                                         0x145c, NULL);
+       if (pinfo->smbus_dev) {
+               pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
        } else {
                pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
                                PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
@@ -260,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
 {
        /* Make sure amd chipset type has already been initialized */
        usb_amd_find_chipset_info();
-       if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
-               return 0;
-
-       dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
-       return 1;
+       if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+           amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
+               dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+               return 1;
+       }
+       return 0;
 }
 EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
 
@@ -1150,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
 }
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
                        PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
+
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
+{
+       /*
+        * Our dear uPD72020{1,2} friend only partially resets when
+        * asked to via the XHCI interface, and may end up doing DMA
+        * at the wrong addresses, as it keeps the top 32bit of some
+        * addresses from its previous programming under obscure
+        * circumstances.
+        * Give it a good wack at probe time. Unfortunately, this
+        * needs to happen before we've had a chance to discover any
+        * quirk, or the system will be in a rather bad state.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+           (pdev->device == 0x0014 || pdev->device == 0x0015))
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
index 6559944801987728a1db6ba31f09db51b92362e3..5582cbafecd4c1a3ddc5443d6cc9182a9b9bc89f 100644 (file)
@@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 void sb800_prefetch(struct device *dev, int on);
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
 #else
 struct pci_dev;
 static inline void usb_amd_quirk_pll_disable(void) {}
index 5b0fa553c8bc940e88a6db731cf6dfeb0c9fb971..8071c8fdd15e741b008af64075cda3c87072bfb4 100644 (file)
@@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
        driver = (struct hc_driver *)id->driver_data;
 
+       /* For some HW implementation, a XHCI reset is just not enough... */
+       if (usb_xhci_needs_pci_reset(dev)) {
+               dev_info(&dev->dev, "Resetting\n");
+               if (pci_reset_function_locked(dev))
+                       dev_warn(&dev->dev, "Reset failed");
+       }
+
        /* Prevent runtime suspending between USB-2 and USB-3 initialization */
        pm_runtime_get_noresume(&dev->dev);
 
index 76decb8011ebc2d37d3e7e64a394bd2bb6e5c8ba..3344ffd5bb13743812bab737f52b8e81eb77b9db 100644 (file)
@@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
                                "Could not flush host TX%d fifo: csr: %04x\n",
                                ep->epnum, csr))
                        return;
+               mdelay(1);
        }
 }
 
index 8fb86a5f458e01275186dc83ff86e38b38fff35b..3d0dd2f9741571423522ee80c972d8db2ffec853 100644 (file)
@@ -197,6 +197,7 @@ struct msm_otg {
        struct regulator *v3p3;
        struct regulator *v1p8;
        struct regulator *vddcx;
+       struct regulator_bulk_data supplies[3];
 
        struct reset_control *phy_rst;
        struct reset_control *link_rst;
@@ -1731,7 +1732,6 @@ static int msm_otg_reboot_notify(struct notifier_block *this,
 
 static int msm_otg_probe(struct platform_device *pdev)
 {
-       struct regulator_bulk_data regs[3];
        int ret = 0;
        struct device_node *np = pdev->dev.of_node;
        struct msm_otg_platform_data *pdata;
@@ -1817,17 +1817,18 @@ static int msm_otg_probe(struct platform_device *pdev)
                return motg->irq;
        }
 
-       regs[0].supply = "vddcx";
-       regs[1].supply = "v3p3";
-       regs[2].supply = "v1p8";
+       motg->supplies[0].supply = "vddcx";
+       motg->supplies[1].supply = "v3p3";
+       motg->supplies[2].supply = "v1p8";
 
-       ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
+       ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies),
+                                     motg->supplies);
        if (ret)
                return ret;
 
-       motg->vddcx = regs[0].consumer;
-       motg->v3p3  = regs[1].consumer;
-       motg->v1p8  = regs[2].consumer;
+       motg->vddcx = motg->supplies[0].consumer;
+       motg->v3p3  = motg->supplies[1].consumer;
+       motg->v1p8  = motg->supplies[2].consumer;
 
        clk_set_rate(motg->clk, 60000000);
 
index 93fba9033b00a7136b85a9995d7da32a140c61e9..2c8161bcf5b5e22ee8ed2eb413d29fdfde504e95 100644 (file)
@@ -639,14 +639,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
        struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
        struct usbhs_pipe *pipe;
        unsigned long flags;
-       int ret = 0;
 
        spin_lock_irqsave(&uep->lock, flags);
        pipe = usbhsg_uep_to_pipe(uep);
-       if (!pipe) {
-               ret = -EINVAL;
+       if (!pipe)
                goto out;
-       }
 
        usbhsg_pipe_disable(uep);
        usbhs_pipe_free(pipe);
index d544b331c9f2ce80d83095f30184e2102eda6ea6..02b67abfc2a16139a3230d1120a17a4a9ab1cc09 100644 (file)
 /* Low Power Status register (LPSTS) */
 #define LPSTS_SUSPM    0x4000
 
-/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */
+/*
+ * USB General control register 2 (UGCTRL2)
+ * Remarks: bit[31:11] and bit[9:6] should be 0
+ */
 #define UGCTRL2_RESERVED_3     0x00000001      /* bit[3:0] should be B'0001 */
 #define UGCTRL2_USB0SEL_OTG    0x00000030
+#define UGCTRL2_VBUSSEL                0x00000400
 
 static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
 {
@@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
 {
        struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
 
-       usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
+       usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |
+                     UGCTRL2_VBUSSEL);
 
        if (enable) {
                usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
index f64e914a8985495bf6ef51bb3de4772c40ffbf49..2d945c9f975c04d5cd7909e00a37017a97e8062e 100644 (file)
@@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
        { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
        { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
+       { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
index ebe51f11105d8889d6144a72bbf719f14e4b1103..fe123153b1a5439f016f2637d4f9d19f0fde3f14 100644 (file)
@@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },                   /* D-Link DWM-158 */
        { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),                     /* D-Link DWM-221 B1 */
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),                     /* D-Link DWM-222 */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
index c9ebefd8f35fdbe5491627f4df89b80c87aeabad..a585b477415dde58b38c9e2defe0d26c24552757 100644 (file)
@@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
                .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
+       { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485),
+               .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
        { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
        { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
index 09d9be88209e1ce6b1f53dc052a53c5e4c491336..3b5a15d1dc0dd50a0ebf9d925e33ef4cf8db0090 100644 (file)
@@ -27,6 +27,7 @@
 #define ATEN_VENDOR_ID         0x0557
 #define ATEN_VENDOR_ID2                0x0547
 #define ATEN_PRODUCT_ID                0x2008
+#define ATEN_PRODUCT_UC485     0x2021
 #define ATEN_PRODUCT_ID2       0x2118
 
 #define IODATA_VENDOR_ID       0x04bb
index cbea9f329e715aad97b23928d79579cdcf5e8c92..cde115359793001dcf18b8884c3c9e22eec0fa73 100644 (file)
@@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
 /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
 UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
                "Initio Corporation",
-               "",
+               "INIC-3069",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-               US_FL_NO_ATA_1X),
+               US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
 
 /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
 UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
index 06615934fed1cc537694b03d023b613c95aea9f1..0dceb9fa3a0629af3bd1a1990508d2c32bb42db2 100644 (file)
@@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us)
 {
        struct us_data *us = (struct us_data *)__us;
        struct Scsi_Host *host = us_to_host(us);
+       struct scsi_cmnd *srb;
 
        for (;;) {
                usb_stor_dbg(us, "*** thread sleeping\n");
@@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us)
                scsi_lock(host);
 
                /* When we are called with no command pending, we're done */
+               srb = us->srb;
                if (us->srb == NULL) {
                        scsi_unlock(host);
                        mutex_unlock(&us->dev_mutex);
@@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us)
                /* lock access to the state */
                scsi_lock(host);
 
-               /* indicate that the command is done */
-               if (us->srb->result != DID_ABORT << 16) {
-                       usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
-                                    us->srb->result);
-                       us->srb->scsi_done(us->srb);
-               } else {
+               /* was the command aborted? */
+               if (us->srb->result == DID_ABORT << 16) {
 SkipForAbort:
                        usb_stor_dbg(us, "scsi command aborted\n");
+                       srb = NULL;     /* Don't call srb->scsi_done() */
                }
 
                /*
@@ -429,6 +428,13 @@ SkipForAbort:
 
                /* unlock the device pointers */
                mutex_unlock(&us->dev_mutex);
+
+               /* now that the locks are released, notify the SCSI core */
+               if (srb) {
+                       usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
+                                       srb->result);
+                       srb->scsi_done(srb);
+               }
        } /* for (;;) */
 
        /* Wait until we are told to stop */
index ff01bed7112f1566ca13b3e17330851bf02fec05..1e784adb89b17534ce31751b546ef20801b2427f 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/efi.h>
 
 static bool request_mem_succeeded = false;
+static bool nowc = false;
 
 static struct fb_var_screeninfo efifb_defined = {
        .activate               = FB_ACTIVATE_NOW,
@@ -99,6 +100,8 @@ static int efifb_setup(char *options)
                                screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
                        else if (!strncmp(this_opt, "width:", 6))
                                screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+                       else if (!strcmp(this_opt, "nowc"))
+                               nowc = true;
                }
        }
 
@@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev)
        info->apertures->ranges[0].base = efifb_fix.smem_start;
        info->apertures->ranges[0].size = size_remap;
 
-       info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+       if (nowc)
+               info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
+       else
+               info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
        if (!info->screen_base) {
                pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
                        efifb_fix.smem_len, efifb_fix.smem_start);
index c166e0725be5dab13e9a685a93ea7ea9c23a3351..ba82f97fb42b2d10fdbebd227fcb7e5eb19dcbdc 100644 (file)
@@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev)
        imxfb_disable_controller(fbi);
 
        unregister_framebuffer(info);
-
+       fb_dealloc_cmap(&info->cmap);
        pdata = dev_get_platdata(&pdev->dev);
        if (pdata && pdata->exit)
                pdata->exit(fbi->pdev);
-
-       fb_dealloc_cmap(&info->cmap);
-       kfree(info->pseudo_palette);
-       framebuffer_release(info);
-
        dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
                    fbi->map_dma);
-
        iounmap(fbi->regs);
        release_mem_region(res->start, resource_size(res));
+       kfree(info->pseudo_palette);
+       framebuffer_release(info);
 
        return 0;
 }
index eecf695c16f41b6996520e47b8fa7e1b71efbabc..09e5bb013d28071c69b63b1968ef6eca44252f1e 100644 (file)
@@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = {
 
 static int __init omap_dss_probe(struct platform_device *pdev)
 {
-       struct omap_dss_board_info *pdata = pdev->dev.platform_data;
        int r;
 
        core.pdev = pdev;
index 4da69dbf7dcad7f2e2898dd807a118596f86058a..1bdd02a6d6ac757c5a500db192b3d7923c316753 100644 (file)
@@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
        unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
        unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
 
-       return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
-               ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
+       return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
 #else
        /*
         * XXX: Add support for merging bio_vec when using different page
index bae1f5d36c26e8eac1a7ce9473a3ca7999b90642..2d43118077e4eecc12f68bee605fb068def11c95 100644 (file)
@@ -574,7 +574,7 @@ static void shutdown_pirq(struct irq_data *data)
 
 static void enable_pirq(struct irq_data *data)
 {
-       startup_pirq(data);
+       enable_dynirq(data);
 }
 
 static void disable_pirq(struct irq_data *data)
index e460802149555b6f0d5def7659d8e8207828eb53..3e59590c7254ddc8f1a08f4232262a74a29e3711 100644 (file)
@@ -857,6 +857,8 @@ static int xenwatch_thread(void *unused)
        struct list_head *ent;
        struct xs_watch_event *event;
 
+       xenwatch_pid = current->pid;
+
        for (;;) {
                wait_event_interruptible(watch_events_waitq,
                                         !list_empty(&watch_events));
@@ -925,7 +927,6 @@ int xs_init(void)
        task = kthread_run(xenwatch_thread, NULL, "xenwatch");
        if (IS_ERR(task))
                return PTR_ERR(task);
-       xenwatch_pid = task->pid;
 
        /* shutdown watches for kexec boot */
        xs_reset_watches();
index 108df2e3602c2c63186b61b2fede52794cb46482..44dfbca9306f04c29081354cc232b8e9b590bb21 100644 (file)
@@ -133,7 +133,7 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb)
        return sb->s_fs_info;
 }
 
-struct pts_fs_info *devpts_acquire(struct file *filp)
+struct pts_fs_info *devpts_acquire(struct file *filp, struct vfsmount **ptsmnt)
 {
        struct pts_fs_info *result;
        struct path path;
@@ -142,6 +142,7 @@ struct pts_fs_info *devpts_acquire(struct file *filp)
 
        path = filp->f_path;
        path_get(&path);
+       *ptsmnt = NULL;
 
        /* Has the devpts filesystem already been found? */
        sb = path.mnt->mnt_sb;
@@ -165,6 +166,7 @@ struct pts_fs_info *devpts_acquire(struct file *filp)
         * pty code needs to hold extra references in case of last /dev/tty close
         */
        atomic_inc(&sb->s_active);
+       *ptsmnt = mntget(path.mnt);
        result = DEVPTS_SB(sb);
 
 out:
index 3ee4fdc3da9ec359ad847afa36354240329a2da6..ab60051be6e533eb167a72e590494f0a46e3a488 100644 (file)
@@ -46,7 +46,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
 {
        struct fuse_file *ff;
 
-       ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
+       ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
        if (unlikely(!ff))
                return NULL;
 
@@ -609,7 +609,7 @@ static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
        struct fuse_io_priv *io = req->io;
        ssize_t pos = -1;
 
-       fuse_release_user_pages(req, !io->write);
+       fuse_release_user_pages(req, io->should_dirty);
 
        if (io->write) {
                if (req->misc.write.in.size != req->misc.write.out.size)
@@ -1316,7 +1316,6 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                       loff_t *ppos, int flags)
 {
        int write = flags & FUSE_DIO_WRITE;
-       bool should_dirty = !write && iter_is_iovec(iter);
        int cuse = flags & FUSE_DIO_CUSE;
        struct file *file = io->file;
        struct inode *inode = file->f_mapping->host;
@@ -1346,6 +1345,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                        inode_unlock(inode);
        }
 
+       io->should_dirty = !write && iter_is_iovec(iter);
        while (count) {
                size_t nres;
                fl_owner_t owner = current->files;
@@ -1360,7 +1360,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                        nres = fuse_send_read(req, io, pos, nbytes, owner);
 
                if (!io->async)
-                       fuse_release_user_pages(req, should_dirty);
+                       fuse_release_user_pages(req, io->should_dirty);
                if (req->out.h.error) {
                        err = req->out.h.error;
                        break;
@@ -1669,6 +1669,7 @@ err_nofile:
 err_free:
        fuse_request_free(req);
 err:
+       mapping_set_error(page->mapping, error);
        end_page_writeback(page);
        return error;
 }
index 1bd7ffdad593977013c1ddd233b2a91093471471..bd4d2a3e1ec1b8cc0af29bcb8c26f13d82708804 100644 (file)
@@ -249,6 +249,7 @@ struct fuse_io_priv {
        size_t size;
        __u64 offset;
        bool write;
+       bool should_dirty;
        int err;
        struct kiocb *iocb;
        struct file *file;
index 039266128b7ff0b09ed2a55e621a9a0dc9e38720..59cc98ad7577b438a7928cdace1f2af3a7cabf75 100644 (file)
@@ -278,7 +278,7 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                unsigned long bytes;    /* Bytes to write to page */
 
                offset = (pos & (PAGE_SIZE - 1));
-               bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
+               bytes = min_t(loff_t, PAGE_SIZE - offset, length);
 
                rpage = __iomap_read_page(inode, pos);
                if (IS_ERR(rpage))
@@ -373,7 +373,7 @@ iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
                unsigned offset, bytes;
 
                offset = pos & (PAGE_SIZE - 1); /* Within page */
-               bytes = min_t(unsigned, PAGE_SIZE - offset, count);
+               bytes = min_t(loff_t, PAGE_SIZE - offset, count);
 
                if (IS_DAX(inode))
                        status = iomap_dax_zero(pos, offset, bytes, iomap);
index 69d02cf8cf370678609175d6e5626d0dedf02c3a..5f93cfacb3d14b9befc6ba33a91d79ccde044d89 100644 (file)
@@ -121,6 +121,7 @@ config PNFS_FILE_LAYOUT
 config PNFS_BLOCK
        tristate
        depends on NFS_V4_1 && BLK_DEV_DM
+       depends on 64BIT || LBDAF
        default NFS_V4
 
 config PNFS_FLEXFILE_LAYOUT
index 6df7a0cf566015378aa3f76c480115675454297d..f32c58bbe55671cb75abdcb9934152d110e3537d 100644 (file)
@@ -32,6 +32,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
 {
        nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
        nfs4_pnfs_ds_put(mirror_ds->ds);
+       kfree(mirror_ds->ds_versions);
        kfree_rcu(mirror_ds, id_node.rcu);
 }
 
index ffd2e712595d8ac875dc2780b52731543c687ca8..d901326423401c3e7d442f62bfa80d03d281ed02 100644 (file)
@@ -2553,9 +2553,8 @@ static int nfs41_check_open_stateid(struct nfs4_state *state)
                clear_bit(NFS_O_RDWR_STATE, &state->flags);
                clear_bit(NFS_OPEN_STATE, &state->flags);
                stateid->type = NFS4_INVALID_STATEID_TYPE;
-       }
-       if (status != NFS_OK)
                return status;
+       }
        if (nfs_open_stateid_recover_openmode(state))
                return -NFS4ERR_OPENMODE;
        return NFS_OK;
index 8a428498d6b21f08c8c26ef184ff9f4332b5cdd0..509a61668d902b84f6756e2ed1bcb22a6d7020a5 100644 (file)
@@ -106,13 +106,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                    global_node_page_state(NR_FILE_MAPPED));
        show_val_kb(m, "Shmem:          ", i.sharedram);
        show_val_kb(m, "Slab:           ",
-                   global_page_state(NR_SLAB_RECLAIMABLE) +
-                   global_page_state(NR_SLAB_UNRECLAIMABLE));
+                   global_node_page_state(NR_SLAB_RECLAIMABLE) +
+                   global_node_page_state(NR_SLAB_UNRECLAIMABLE));
 
        show_val_kb(m, "SReclaimable:   ",
-                   global_page_state(NR_SLAB_RECLAIMABLE));
+                   global_node_page_state(NR_SLAB_RECLAIMABLE));
        show_val_kb(m, "SUnreclaim:     ",
-                   global_page_state(NR_SLAB_UNRECLAIMABLE));
+                   global_node_page_state(NR_SLAB_UNRECLAIMABLE));
        seq_printf(m, "KernelStack:    %8lu kB\n",
                   global_page_state(NR_KERNEL_STACK_KB));
        show_val_kb(m, "PageTables:     ",
index b836fd61ed878a38d25d5ffe44bb86e30066955c..fe8f3265e8779ac18a5694ef600c024f9e88f281 100644 (file)
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
 #include <linux/shmem_fs.h>
+#include <linux/uaccess.h>
 
 #include <asm/elf.h>
-#include <linux/uaccess.h>
+#include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include "internal.h"
 
@@ -1008,6 +1009,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
        struct mm_struct *mm;
        struct vm_area_struct *vma;
        enum clear_refs_types type;
+       struct mmu_gather tlb;
        int itype;
        int rv;
 
@@ -1054,6 +1056,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                }
 
                down_read(&mm->mmap_sem);
+               tlb_gather_mmu(&tlb, mm, 0, -1);
                if (type == CLEAR_REFS_SOFT_DIRTY) {
                        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -1075,7 +1078,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
                if (type == CLEAR_REFS_SOFT_DIRTY)
                        mmu_notifier_invalidate_range_end(mm, 0, -1);
-               flush_tlb_mm(mm);
+               tlb_finish_mmu(&tlb, 0, -1);
                up_read(&mm->mmap_sem);
 out_mm:
                mmput(mm);
index 53a17496c5c536a9410cba73c16d70c5efdbba88..566e6ef99f077c76680cb005417ae995cb4e2878 100644 (file)
@@ -1124,6 +1124,10 @@ void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
                WARN_ON_ONCE(1);
                dquot->dq_dqb.dqb_rsvspace = 0;
        }
+       if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
+           dquot->dq_dqb.dqb_bsoftlimit)
+               dquot->dq_dqb.dqb_btime = (time64_t) 0;
+       clear_bit(DQ_BLKS_B, &dquot->dq_flags);
 }
 
 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
@@ -1145,7 +1149,8 @@ static void dquot_decr_space(struct dquot *dquot, qsize_t number)
                dquot->dq_dqb.dqb_curspace -= number;
        else
                dquot->dq_dqb.dqb_curspace = 0;
-       if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
+       if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
+           dquot->dq_dqb.dqb_bsoftlimit)
                dquot->dq_dqb.dqb_btime = (time64_t) 0;
        clear_bit(DQ_BLKS_B, &dquot->dq_flags);
 }
@@ -1381,14 +1386,18 @@ static int info_idq_free(struct dquot *dquot, qsize_t inodes)
 
 static int info_bdq_free(struct dquot *dquot, qsize_t space)
 {
+       qsize_t tspace;
+
+       tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
+
        if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
-           dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
+           tspace <= dquot->dq_dqb.dqb_bsoftlimit)
                return QUOTA_NL_NOWARN;
 
-       if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
+       if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
                return QUOTA_NL_BSOFTBELOW;
-       if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
-           dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
+       if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
+           tspace - space < dquot->dq_dqb.dqb_bhardlimit)
                return QUOTA_NL_BHARDBELOW;
        return QUOTA_NL_NOWARN;
 }
@@ -2681,7 +2690,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
 
        if (check_blim) {
                if (!dm->dqb_bsoftlimit ||
-                   dm->dqb_curspace < dm->dqb_bsoftlimit) {
+                   dm->dqb_curspace + dm->dqb_rsvspace < dm->dqb_bsoftlimit) {
                        dm->dqb_btime = 0;
                        clear_bit(DQ_BLKS_B, &dquot->dq_flags);
                } else if (!(di->d_fieldmask & QC_SPC_TIMER))
index 06ea26b8c996f3cc7a9d6fd177260f89394fb325..b0d5897bc4e6d0e019c79f65b6d41df1d3b0d050 100644 (file)
@@ -1600,7 +1600,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
                                   uffdio_copy.len);
                mmput(ctx->mm);
        } else {
-               return -ENOSPC;
+               return -ESRCH;
        }
        if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
                return -EFAULT;
@@ -1647,7 +1647,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
                                     uffdio_zeropage.range.len);
                mmput(ctx->mm);
        } else {
-               return -ENOSPC;
+               return -ESRCH;
        }
        if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
                return -EFAULT;
index ffd5a15d1bb6d0955633d4e1f61f6a84aa79f3f7..abf5beaae907d32b484128f182bee2d1d7a7cd9e 100644 (file)
@@ -1246,13 +1246,13 @@ xfs_dialloc_ag_inobt(
 
                        /* free inodes to the left? */
                        if (useleft && trec.ir_freecount) {
-                               rec = trec;
                                xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
                                cur = tcur;
 
                                pag->pagl_leftrec = trec.ir_startino;
                                pag->pagl_rightrec = rec.ir_startino;
                                pag->pagl_pagino = pagino;
+                               rec = trec;
                                goto alloc_inode;
                        }
 
index 0053bcf2b10a1de59c459de8c74a8cd1d0269d3b..4ebd0bafc914ce7b7f1e4d35134c9dedb09a23b5 100644 (file)
@@ -749,9 +749,20 @@ xfs_log_mount_finish(
                return 0;
        }
 
+       /*
+        * During the second phase of log recovery, we need iget and
+        * iput to behave like they do for an active filesystem.
+        * xfs_fs_drop_inode needs to be able to prevent the deletion
+        * of inodes before we're done replaying log items on those
+        * inodes.  Turn it off immediately after recovery finishes
+        * so that we don't leak the quota inodes if subsequent mount
+        * activities fail.
+        */
+       mp->m_super->s_flags |= MS_ACTIVE;
        error = xlog_recover_finish(mp->m_log);
        if (!error)
                xfs_log_work_queue(mp);
+       mp->m_super->s_flags &= ~MS_ACTIVE;
 
        return error;
 }
index 40d4e8b4e193b41a123903735736248568bf26b9..ea7d4b4e50d0ca3eedee85ffd840542e0bb105db 100644 (file)
@@ -944,15 +944,6 @@ xfs_mountfs(
                }
        }
 
-       /*
-        * During the second phase of log recovery, we need iget and
-        * iput to behave like they do for an active filesystem.
-        * xfs_fs_drop_inode needs to be able to prevent the deletion
-        * of inodes before we're done replaying log items on those
-        * inodes.
-        */
-       mp->m_super->s_flags |= MS_ACTIVE;
-
        /*
         * Finish recovering the file system.  This part needed to be delayed
         * until after the root and real-time bitmap inodes were consistently
@@ -1028,12 +1019,13 @@ xfs_mountfs(
  out_quota:
        xfs_qm_unmount_quotas(mp);
  out_rtunmount:
-       mp->m_super->s_flags &= ~MS_ACTIVE;
        xfs_rtunmount_inodes(mp);
  out_rele_rip:
        IRELE(rip);
        cancel_delayed_work_sync(&mp->m_reclaim_work);
        xfs_reclaim_inodes(mp, SYNC_WAIT);
+       /* Clean out dquots that might be in memory after quotacheck. */
+       xfs_qm_unmount(mp);
  out_log_dealloc:
        mp->m_flags |= XFS_MOUNT_UNMOUNTING;
        xfs_log_mount_cancel(mp);
index 8afa4335e5b2bfd0c42c00e1b1506d4e1f7377ac..faddde44de8c902e6884e64eeb8b22bd0d11b75a 100644 (file)
@@ -112,10 +112,11 @@ struct mmu_gather {
 
 #define HAVE_GENERIC_MMU_GATHER
 
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
+void arch_tlb_gather_mmu(struct mmu_gather *tlb,
+       struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
-                                                       unsigned long end);
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                        unsigned long start, unsigned long end, bool force);
 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
                                   int page_size);
 
index c749eef1daa1557910ec81c5f296dc907f5ccab2..27b4b66152637fe38b82711f0fa66d6ce946f7d7 100644 (file)
@@ -1209,6 +1209,7 @@ static inline bool acpi_has_watchdog(void) { return false; }
 #endif
 
 #ifdef CONFIG_ACPI_SPCR_TABLE
+extern bool qdf2400_e44_present;
 int parse_spcr(bool earlycon);
 #else
 static inline int parse_spcr(bool earlycon) { return 0; }
index 277ab9af9ac29a95773ce2201809f7c7613391a1..7883e901f65c826d8cd98bde911352aa72dafcce 100644 (file)
@@ -19,7 +19,7 @@
 
 struct pts_fs_info;
 
-struct pts_fs_info *devpts_acquire(struct file *);
+struct pts_fs_info *devpts_acquire(struct file *, struct vfsmount **ptsmnt);
 void devpts_release(struct pts_fs_info *);
 
 int devpts_new_index(struct pts_fs_info *);
index 497f2b3a5a62c8da6f87107de16519b176cc9f1f..97f1b465d04ff0b1ab33b0c1074a722ca41c1b0a 100644 (file)
@@ -105,6 +105,11 @@ struct st_sensor_fullscale {
        struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
 };
 
+struct st_sensor_sim {
+       u8 addr;
+       u8 value;
+};
+
 /**
  * struct st_sensor_bdu - ST sensor device block data update
  * @addr: address of the register.
@@ -197,6 +202,7 @@ struct st_sensor_transfer_function {
  * @bdu: Block data update register.
  * @das: Data Alignment Selection register.
  * @drdy_irq: Data ready register of the sensor.
+ * @sim: SPI serial interface mode register of the sensor.
  * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
  * @bootime: samples to discard when sensor passing from power-down to power-up.
  */
@@ -213,6 +219,7 @@ struct st_sensor_settings {
        struct st_sensor_bdu bdu;
        struct st_sensor_das das;
        struct st_sensor_data_ready_irq drdy_irq;
+       struct st_sensor_sim sim;
        bool multi_read_bit;
        unsigned int bootime;
 };
index 77d427974f575699d181265a9eb4c8d1cdd4dad5..bae11c7e7bf31920fcf0b928b31dc29ad70f6f8b 100644 (file)
@@ -61,6 +61,7 @@ extern int memblock_debug;
 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
 #define __init_memblock __meminit
 #define __initdata_memblock __meminitdata
+void memblock_discard(void);
 #else
 #define __init_memblock
 #define __initdata_memblock
@@ -74,8 +75,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
                                        int nid, ulong flags);
 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
                                   phys_addr_t size, phys_addr_t align);
-phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
-phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
 void memblock_allow_resize(void);
 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 int memblock_add(phys_addr_t base, phys_addr_t size);
@@ -110,6 +109,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
                                phys_addr_t *out_end);
 
+void __memblock_free_early(phys_addr_t base, phys_addr_t size);
+void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+
 /**
  * for_each_mem_range - iterate through memblock areas from type_a and not
  * included in type_b. Or just type_a if type_b is NULL.
index 3914e3dd61680a9bc8814d9b9a386c59611eca81..9b15a4bcfa77dca9c51f88a195a753a018425e53 100644 (file)
@@ -484,7 +484,8 @@ bool mem_cgroup_oom_synchronize(bool wait);
 extern int do_swap_account;
 #endif
 
-void lock_page_memcg(struct page *page);
+struct mem_cgroup *lock_page_memcg(struct page *page);
+void __unlock_page_memcg(struct mem_cgroup *memcg);
 void unlock_page_memcg(struct page *page);
 
 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
@@ -809,7 +810,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 {
 }
 
-static inline void lock_page_memcg(struct page *page)
+static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+{
+       return NULL;
+}
+
+static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
 {
 }
 
index 7f384bb62d8ec6bc7eafa25828b0716be63c7ccb..3cadee0a350889f748e7b1a999b449ae003e9c3f 100644 (file)
@@ -487,14 +487,12 @@ struct mm_struct {
        /* numa_scan_seq prevents two threads setting pte_numa */
        int numa_scan_seq;
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
        /*
         * An operation with batched TLB flushing is going on. Anything that
         * can move process memory needs to flush the TLB when moving a
         * PROT_NONE or PROT_NUMA mapped page.
         */
-       bool tlb_flush_pending;
-#endif
+       atomic_t tlb_flush_pending;
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
        /* See flush_tlb_batched_pending() */
        bool tlb_flush_batched;
@@ -522,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
        return mm->cpu_vm_mask_var;
 }
 
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                               unsigned long start, unsigned long end);
+extern void tlb_finish_mmu(struct mmu_gather *tlb,
+                               unsigned long start, unsigned long end);
+
 /*
  * Memory barriers to keep this state in sync are graciously provided by
  * the page table locks, outside of which no page table modifications happen.
- * The barriers below prevent the compiler from re-ordering the instructions
- * around the memory barriers that are already present in the code.
+ * The barriers are used to ensure the order between tlb_flush_pending updates,
+ * which happen while the lock is not taken, and the PTE updates, which happen
+ * while the lock is taken, are serialized.
  */
 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 {
-       barrier();
-       return mm->tlb_flush_pending;
+       return atomic_read(&mm->tlb_flush_pending) > 0;
+}
+
+/*
+ * Returns true if there are two above TLB batching threads in parallel.
+ */
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+       return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+       atomic_set(&mm->tlb_flush_pending, 0);
 }
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
 {
-       mm->tlb_flush_pending = true;
+       atomic_inc(&mm->tlb_flush_pending);
 
        /*
-        * Guarantee that the tlb_flush_pending store does not leak into the
+        * Guarantee that the tlb_flush_pending increase does not leak into the
         * critical section updating the page tables
         */
        smp_mb__before_spinlock();
 }
+
 /* Clearing is done after a TLB flush, which also provides a barrier. */
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
-{
-       barrier();
-       mm->tlb_flush_pending = false;
-}
-#else
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-{
-       return false;
-}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
-{
-}
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
 {
+       /*
+        * Guarantee that the tlb_flush_pending does not not leak into the
+        * critical section, since we must order the PTE change and changes to
+        * the pending TLB flush indication. We could have relied on TLB flush
+        * as a memory barrier, but this behavior is not clearly documented.
+        */
+       smp_mb__before_atomic();
+       atomic_dec(&mm->tlb_flush_pending);
 }
-#endif
 
 struct vm_fault;
 
index dda2cc939a531dab67441c6ddf4b7869c6a06159..ebeb48c920054d705924453f6ed6c6aca0233af4 100644 (file)
@@ -37,7 +37,7 @@ struct net;
 
 /* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
  * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
- * Eventually all flags will be in sk->sk_wq_flags.
+ * Eventually all flags will be in sk->sk_wq->flags.
  */
 #define SOCKWQ_ASYNC_NOSPACE   0
 #define SOCKWQ_ASYNC_WAITDATA  1
index 8aa01fd859fb84e5f64e9d7ffb0c20957afdfd2d..a36abe2da13e1a23edc8aa152205af143f658990 100644 (file)
@@ -168,6 +168,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
 #define sysctl_softlockup_all_cpu_backtrace 0
 #define sysctl_hardlockup_all_cpu_backtrace 0
 #endif
+
+#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
+    defined(CONFIG_HARDLOCKUP_DETECTOR)
+void watchdog_update_hrtimer_threshold(u64 period);
+#else
+static inline void watchdog_update_hrtimer_threshold(u64 period) { }
+#endif
+
 extern bool is_hardlockup(void);
 struct ctl_table;
 extern int proc_watchdog(struct ctl_table *, int ,
index 6c8c5d8041b72ec01097d1c0563b793ea7449f1f..2591878c1d4804d374d39491c2a3f64d1b43a214 100644 (file)
@@ -346,6 +346,11 @@ struct nvme_fc_remote_port {
  *       indicating an FC transport Aborted status.
  *       Entrypoint is Mandatory.
  *
+ * @defer_rcv:  Called by the transport to signal the LLLD that it has
+ *       begun processing of a previously received NVME CMD IU. The LLDD
+ *       is now free to re-use the rcv buffer associated with the
+ *       nvmefc_tgt_fcp_req.
+ *
  * @max_hw_queues:  indicates the maximum number of hw queues the LLDD
  *       supports for cpu affinitization.
  *       Value is Mandatory. Must be at least 1.
@@ -846,6 +851,8 @@ struct nvmet_fc_target_template {
                                struct nvmefc_tgt_fcp_req *fcpreq);
        void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
                                struct nvmefc_tgt_fcp_req *fcpreq);
+       void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
+                               struct nvmefc_tgt_fcp_req *fcpreq);
 
        u32     max_hw_queues;
        u16     max_sgl_segments;
index 8a266e2be5a63a29a2de38ff76f0bdc2e4fac08e..76aac4ce39bcf59e2e5eeb38053cf05f0c9d05ae 100644 (file)
@@ -6,6 +6,8 @@
 #include <linux/types.h>
 #include <linux/nodemask.h>
 #include <uapi/linux/oom.h>
+#include <linux/sched/coredump.h> /* MMF_* */
+#include <linux/mm.h> /* VM_FAULT* */
 
 struct zonelist;
 struct notifier_block;
@@ -63,6 +65,26 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
        return tsk->signal->oom_mm;
 }
 
+/*
+ * Checks whether a page fault on the given mm is still reliable.
+ * This is no longer true if the oom reaper started to reap the
+ * address space which is reflected by MMF_UNSTABLE flag set in
+ * the mm. At that moment any !shared mapping would lose the content
+ * and could cause a memory corruption (zero pages instead of the
+ * original content).
+ *
+ * User should call this before establishing a page table entry for
+ * a !shared mapping and under the proper page table lock.
+ *
+ * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
+ */
+static inline int check_stable_address_space(struct mm_struct *mm)
+{
+       if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
+               return VM_FAULT_SIGBUS;
+       return 0;
+}
+
 extern unsigned long oom_badness(struct task_struct *p,
                struct mem_cgroup *memcg, const nodemask_t *nodemask,
                unsigned long totalpages);
index 4869e66dd659a6bc8fe4ad90df2ed9d3ff98ccac..f958d0732af685761c8b996cc6c79ca87f99f43f 100644 (file)
@@ -188,6 +188,8 @@ enum pci_dev_flags {
         * the direct_complete optimization.
         */
        PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
+       /* Don't use Relaxed Ordering for TLPs directed at this device */
+       PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 12),
 };
 
 enum pci_irq_reroute_variant {
@@ -1067,6 +1069,7 @@ void pcie_flr(struct pci_dev *dev);
 int __pci_reset_function(struct pci_dev *dev);
 int __pci_reset_function_locked(struct pci_dev *dev);
 int pci_reset_function(struct pci_dev *dev);
+int pci_reset_function_locked(struct pci_dev *dev);
 int pci_try_reset_function(struct pci_dev *dev);
 int pci_probe_reset_slot(struct pci_slot *slot);
 int pci_reset_slot(struct pci_slot *slot);
@@ -1125,6 +1128,7 @@ bool pci_check_pme_status(struct pci_dev *dev);
 void pci_pme_wakeup_bus(struct pci_bus *bus);
 void pci_d3cold_enable(struct pci_dev *dev);
 void pci_d3cold_disable(struct pci_dev *dev);
+bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
 
 /* PCI Virtual Channel */
 int pci_save_vc_state(struct pci_dev *dev);
index a3b873fc59e41745c78bc31a70f0c71d7e4374c3..b14095bcf4bb40522b66716d4c1e7b59ebf72eb4 100644 (file)
@@ -310,8 +310,8 @@ struct pmu {
         * Notification that the event was mapped or unmapped.  Called
         * in the context of the mapping task.
         */
-       void (*event_mapped)            (struct perf_event *event); /*optional*/
-       void (*event_unmapped)          (struct perf_event *event); /*optional*/
+       void (*event_mapped)            (struct perf_event *event, struct mm_struct *mm); /* optional */
+       void (*event_unmapped)          (struct perf_event *event, struct mm_struct *mm); /* optional */
 
        /*
         * Flags for ->add()/->del()/ ->start()/->stop(). There are
index 79b0e4cdb8141a10e73affb9a837d5bcc5f00a2a..f8274b0c68880ccbd02de1a85ece46a3cab2053f 100644 (file)
  *     Available only for accelerometer and pressure sensors.
  *     Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
  * @open_drain: set the interrupt line to be open drain if possible.
+ * @spi_3wire: enable spi-3wire mode.
  */
 struct st_sensors_platform_data {
        u8 drdy_int_pin;
        bool open_drain;
+       bool spi_3wire;
 };
 
 #endif /* ST_SENSORS_PDATA_H */
index 5726107963b2d9930d61c3cf5c486abfe244c18a..0ad87c434ae6a344984837e8dd053fe7705d21f1 100644 (file)
@@ -43,12 +43,13 @@ struct sync_file {
 #endif
 
        wait_queue_head_t       wq;
+       unsigned long           flags;
 
        struct dma_fence        *fence;
        struct dma_fence_cb cb;
 };
 
-#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
+#define POLL_ENABLED 0
 
 struct sync_file *sync_file_create(struct dma_fence *fence);
 struct dma_fence *sync_file_get_fence(int fd);
index 5b74e36c0ca896481acd04bbea92be002ad4ba64..dc19880c02f5eb9091d185c567b961754cfa46e2 100644 (file)
@@ -757,6 +757,43 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
        __ret;                                                                  \
 })
 
+#define __wait_event_killable_timeout(wq_head, condition, timeout)             \
+       ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
+                     TASK_KILLABLE, 0, timeout,                                \
+                     __ret = schedule_timeout(__ret))
+
+/**
+ * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a kill signal is received.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
+ * interrupted by a kill signal.
+ *
+ * Only kill signals interrupt this process.
+ */
+#define wait_event_killable_timeout(wq_head, condition, timeout)               \
+({                                                                             \
+       long __ret = timeout;                                                   \
+       might_sleep();                                                          \
+       if (!___wait_cond_timeout(condition))                                   \
+               __ret = __wait_event_killable_timeout(wq_head,                  \
+                                               condition, timeout);            \
+       __ret;                                                                  \
+})
+
 
 #define __wait_event_lock_irq(wq_head, condition, lock, cmd)                   \
        (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
index 6df79e96a780b3bf0c141fee5b43eded5276775c..f44ff2476758e27ad9f9cc20d96d96f16e5ee98c 100644 (file)
@@ -336,6 +336,16 @@ static inline void in6_dev_put(struct inet6_dev *idev)
                in6_dev_finish_destroy(idev);
 }
 
+static inline void in6_dev_put_clear(struct inet6_dev **pidev)
+{
+       struct inet6_dev *idev = *pidev;
+
+       if (idev) {
+               in6_dev_put(idev);
+               *pidev = NULL;
+       }
+}
+
 static inline void __in6_dev_put(struct inet6_dev *idev)
 {
        refcount_dec(&idev->refcnt);
index b00508d22e0a6adc37ee4e69187377fea3bc102d..b2e68657a2162cd55b957c9230779f5d066fe6f3 100644 (file)
@@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond)
               BOND_MODE(bond) == BOND_MODE_ALB;
 }
 
+static inline bool bond_needs_speed_duplex(const struct bonding *bond)
+{
+       return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
+}
+
 static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
 {
        return (BOND_MODE(bond) == BOND_MODE_TLB)  &&
index 8ffd434676b7a270af73534f3dbcc26f370fe215..71c72a939bf8b21c5102994ea68796dc89c4f097 100644 (file)
 #include <linux/sched/signal.h>
 #include <net/ip.h>
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-
-struct napi_struct;
-extern unsigned int sysctl_net_busy_read __read_mostly;
-extern unsigned int sysctl_net_busy_poll __read_mostly;
-
 /*             0 - Reserved to indicate value not set
  *     1..NR_CPUS - Reserved for sender_cpu
  *  NR_CPUS+1..~0 - Region available for NAPI IDs
  */
 #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+struct napi_struct;
+extern unsigned int sysctl_net_busy_read __read_mostly;
+extern unsigned int sysctl_net_busy_poll __read_mostly;
+
 static inline bool net_busy_loop_on(void)
 {
        return sysctl_net_busy_poll;
index b2b5419467cc123b4a2b70c2d926b4576651248d..f8149ca192b430c181406fa9a3e4db8fc0a17252 100644 (file)
@@ -5499,6 +5499,21 @@ static inline void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
        ieee80211_manage_rx_ba_offl(vif, addr, tid + IEEE80211_NUM_TIDS);
 }
 
+/**
+ * ieee80211_rx_ba_timer_expired - stop a Rx BA session due to timeout
+ *
+ * Some device drivers do not offload AddBa/DelBa negotiation, but handle rx
+ * buffer reording internally, and therefore also handle the session timer.
+ *
+ * Trigger the timeout flow, which sends a DelBa.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
+void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
+                                  const u8 *addr, unsigned int tid);
+
 /* Rate control API */
 
 /**
index cc8036987dcb885012c6c5eda0fb2bed2e588841..586de4b811b5678c72a045a3ede9852164b9b3e2 100644 (file)
@@ -366,12 +366,13 @@ static inline bool udp_skb_is_linear(struct sk_buff *skb)
 static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
                                  struct iov_iter *to)
 {
-       int n, copy = len - off;
+       int n;
 
-       n = copy_to_iter(skb->data + off, copy, to);
-       if (n == copy)
+       n = copy_to_iter(skb->data + off, len, to);
+       if (n == len)
                return 0;
 
+       iov_iter_revert(to, n);
        return -EFAULT;
 }
 
index 0ca1fb08805b254fa8ffda73f4decd4c48ac2211..fb87d32f5e513de3c2a1b7c6f402b5f48e401c45 100644 (file)
@@ -786,6 +786,7 @@ struct iscsi_np {
        int                     np_sock_type;
        enum np_thread_state_table np_thread_state;
        bool                    enabled;
+       atomic_t                np_reset_count;
        enum iscsi_timer_flags_table np_login_timer_flags;
        u32                     np_exports;
        enum np_flags_table     np_flags;
index 26c54f6d595d4070c7708ef22daf7533468404a2..ad4eb2863e70ee195f9abc68e6b8c3c3020f27bc 100644 (file)
@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
        __u32 size;           /* in, cmdstream size */
        __u32 pad;
        __u32 nr_relocs;      /* in, number of submit_reloc's */
-       __u64 __user relocs;  /* in, ptr to array of submit_reloc's */
+       __u64 relocs;         /* in, ptr to array of submit_reloc's */
 };
 
 /* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
        __u32 fence;          /* out */
        __u32 nr_bos;         /* in, number of submit_bo's */
        __u32 nr_cmds;        /* in, number of submit_cmd's */
-       __u64 __user bos;     /* in, ptr to array of submit_bo's */
-       __u64 __user cmds;    /* in, ptr to array of submit_cmd's */
+       __u64 bos;            /* in, ptr to array of submit_bo's */
+       __u64 cmds;           /* in, ptr to array of submit_cmd's */
        __s32 fence_fd;       /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
 };
 
index 62d686d965813aeb4b8ef34877e886e08bd71912..9eb8b3511636e96e0607be2484119823b1034fb5 100644 (file)
@@ -66,7 +66,7 @@ static struct fsnotify_group *audit_watch_group;
 
 /* fsnotify events we care about. */
 #define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\
-                       FS_MOVE_SELF | FS_EVENT_ON_CHILD)
+                       FS_MOVE_SELF | FS_EVENT_ON_CHILD | FS_UNMOUNT)
 
 static void audit_free_parent(struct audit_parent *parent)
 {
@@ -457,13 +457,15 @@ void audit_remove_watch_rule(struct audit_krule *krule)
        list_del(&krule->rlist);
 
        if (list_empty(&watch->rules)) {
+               /*
+                * audit_remove_watch() drops our reference to 'parent' which
+                * can get freed. Grab our own reference to be safe.
+                */
+               audit_get_parent(parent);
                audit_remove_watch(watch);
-
-               if (list_empty(&parent->watches)) {
-                       audit_get_parent(parent);
+               if (list_empty(&parent->watches))
                        fsnotify_destroy_mark(&parent->mark, audit_watch_group);
-                       audit_put_parent(parent);
-               }
+               audit_put_parent(parent);
        }
 }
 
index 426c2ffba16d4ce1474a798a4c43d78d5abedb1e..ee20d4c546b5ebc0248c084e11c483e0ef800c6f 100644 (file)
@@ -2217,6 +2217,33 @@ static int group_can_go_on(struct perf_event *event,
        return can_add_hw;
 }
 
+/*
+ * Complement to update_event_times(). This computes the tstamp_* values to
+ * continue 'enabled' state from @now, and effectively discards the time
+ * between the prior tstamp_stopped and now (as we were in the OFF state, or
+ * just switched (context) time base).
+ *
+ * This further assumes '@event->state == INACTIVE' (we just came from OFF) and
+ * cannot have been scheduled in yet. And going into INACTIVE state means
+ * '@event->tstamp_stopped = @now'.
+ *
+ * Thus given the rules of update_event_times():
+ *
+ *   total_time_enabled = tstamp_stopped - tstamp_enabled
+ *   total_time_running = tstamp_stopped - tstamp_running
+ *
+ * We can insert 'tstamp_stopped == now' and reverse them to compute new
+ * tstamp_* values.
+ */
+static void __perf_event_enable_time(struct perf_event *event, u64 now)
+{
+       WARN_ON_ONCE(event->state != PERF_EVENT_STATE_INACTIVE);
+
+       event->tstamp_stopped = now;
+       event->tstamp_enabled = now - event->total_time_enabled;
+       event->tstamp_running = now - event->total_time_running;
+}
+
 static void add_event_to_ctx(struct perf_event *event,
                               struct perf_event_context *ctx)
 {
@@ -2224,9 +2251,12 @@ static void add_event_to_ctx(struct perf_event *event,
 
        list_add_event(event, ctx);
        perf_group_attach(event);
-       event->tstamp_enabled = tstamp;
-       event->tstamp_running = tstamp;
-       event->tstamp_stopped = tstamp;
+       /*
+        * We can be called with event->state == STATE_OFF when we create with
+        * .disabled = 1. In that case the IOC_ENABLE will call this function.
+        */
+       if (event->state == PERF_EVENT_STATE_INACTIVE)
+               __perf_event_enable_time(event, tstamp);
 }
 
 static void ctx_sched_out(struct perf_event_context *ctx,
@@ -2471,10 +2501,11 @@ static void __perf_event_mark_enabled(struct perf_event *event)
        u64 tstamp = perf_event_time(event);
 
        event->state = PERF_EVENT_STATE_INACTIVE;
-       event->tstamp_enabled = tstamp - event->total_time_enabled;
+       __perf_event_enable_time(event, tstamp);
        list_for_each_entry(sub, &event->sibling_list, group_entry) {
+               /* XXX should not be > INACTIVE if event isn't */
                if (sub->state >= PERF_EVENT_STATE_INACTIVE)
-                       sub->tstamp_enabled = tstamp - sub->total_time_enabled;
+                       __perf_event_enable_time(sub, tstamp);
        }
 }
 
@@ -5090,7 +5121,7 @@ static void perf_mmap_open(struct vm_area_struct *vma)
                atomic_inc(&event->rb->aux_mmap_count);
 
        if (event->pmu->event_mapped)
-               event->pmu->event_mapped(event);
+               event->pmu->event_mapped(event, vma->vm_mm);
 }
 
 static void perf_pmu_output_stop(struct perf_event *event);
@@ -5113,7 +5144,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
        unsigned long size = perf_data_size(rb);
 
        if (event->pmu->event_unmapped)
-               event->pmu->event_unmapped(event);
+               event->pmu->event_unmapped(event, vma->vm_mm);
 
        /*
         * rb->aux_mmap_count will always drop before rb->mmap_count and
@@ -5411,7 +5442,7 @@ aux_unlock:
        vma->vm_ops = &perf_mmap_vmops;
 
        if (event->pmu->event_mapped)
-               event->pmu->event_mapped(event);
+               event->pmu->event_mapped(event, vma->vm_mm);
 
        return ret;
 }
index 17921b0390b4f91113bcf8c9ccac5c1225751460..e075b7780421dee1d8243b9dc178248398c5f189 100644 (file)
@@ -807,7 +807,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        mm_init_aio(mm);
        mm_init_owner(mm, p);
        mmu_notifier_mm_init(mm);
-       clear_tlb_flush_pending(mm);
+       init_tlb_flush_pending(mm);
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
        mm->pmd_huge_pte = NULL;
 #endif
index a3cc37c0c85e2267497f650611b656af90aaf5aa..3675c6004f2a68c9d60493c7c46e9c46a0baa73f 100644 (file)
@@ -1000,7 +1000,7 @@ EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
 
 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 {
-       unsigned long flags;
+       unsigned long flags, trigger, tmp;
        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 
        if (!desc)
@@ -1014,6 +1014,8 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 
        irq_settings_clr_and_set(desc, clr, set);
 
+       trigger = irqd_get_trigger_type(&desc->irq_data);
+
        irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
                   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
        if (irq_settings_has_no_balance_set(desc))
@@ -1025,7 +1027,11 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
        if (irq_settings_is_level(desc))
                irqd_set(&desc->irq_data, IRQD_LEVEL);
 
-       irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
+       tmp = irq_settings_get_trigger_mask(desc);
+       if (tmp != IRQ_TYPE_NONE)
+               trigger = tmp;
+
+       irqd_set(&desc->irq_data, trigger);
 
        irq_put_desc_unlock(desc, flags);
 }
index 1a9abc1c8ea0046d6491d1786645f1c1e4e1eee2..259a22aa9934cf9d67eb779e5e3eae22db0a3412 100644 (file)
@@ -165,7 +165,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
        struct irq_data *data = irq_get_irq_data(irq);
        struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
 
-       if (!data || !ipimask || cpu > nr_cpu_ids)
+       if (!data || !ipimask || cpu >= nr_cpu_ids)
                return INVALID_HWIRQ;
 
        if (!cpumask_test_cpu(cpu, ipimask))
@@ -195,7 +195,7 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
        if (!chip->ipi_send_single && !chip->ipi_send_mask)
                return -EINVAL;
 
-       if (cpu > nr_cpu_ids)
+       if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
        if (dest) {
index 6d016c5d97c8390f561e6a9456a4e0c1ca59bffe..2f37acde640b6c6e2ce73933a52dbad65f014cf3 100644 (file)
@@ -70,6 +70,18 @@ static DECLARE_RWSEM(umhelper_sem);
 static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
 static DECLARE_WAIT_QUEUE_HEAD(kmod_wq);
 
+/*
+ * This is a restriction on having *all* MAX_KMOD_CONCURRENT threads
+ * running at the same time without returning. When this happens we
+ * believe you've somehow ended up with a recursive module dependency
+ * creating a loop.
+ *
+ * We have no option but to fail.
+ *
+ * Userspace should proactively try to detect and prevent these.
+ */
+#define MAX_KMOD_ALL_BUSY_TIMEOUT 5
+
 /*
        modprobe_path is set via /proc/sys.
 */
@@ -167,8 +179,17 @@ int __request_module(bool wait, const char *fmt, ...)
                pr_warn_ratelimited("request_module: kmod_concurrent_max (%u) close to 0 (max_modprobes: %u), for module %s, throttling...",
                                    atomic_read(&kmod_concurrent_max),
                                    MAX_KMOD_CONCURRENT, module_name);
-               wait_event_interruptible(kmod_wq,
-                                        atomic_dec_if_positive(&kmod_concurrent_max) >= 0);
+               ret = wait_event_killable_timeout(kmod_wq,
+                                                 atomic_dec_if_positive(&kmod_concurrent_max) >= 0,
+                                                 MAX_KMOD_ALL_BUSY_TIMEOUT * HZ);
+               if (!ret) {
+                       pr_warn_ratelimited("request_module: modprobe %s cannot be processed, kmod busy with %d threads for more than %d seconds now",
+                                           module_name, MAX_KMOD_CONCURRENT, MAX_KMOD_ALL_BUSY_TIMEOUT);
+                       return -ETIME;
+               } else if (ret == -ERESTARTSYS) {
+                       pr_warn_ratelimited("request_module: sigkill sent for modprobe %s, giving up", module_name);
+                       return ret;
+               }
        }
 
        trace_module_request(module_name, wait, _RET_IP_);
index 222317721c5a09291c6b78fc839e722b2196b177..0972a8e09d082d99c7f197cbe6bd4fdb6475ba33 100644 (file)
@@ -1650,7 +1650,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
 {
        unsigned long size;
 
-       size = global_page_state(NR_SLAB_RECLAIMABLE)
+       size = global_node_page_state(NR_SLAB_RECLAIMABLE)
                + global_node_page_state(NR_ACTIVE_ANON)
                + global_node_page_state(NR_INACTIVE_ANON)
                + global_node_page_state(NR_ACTIVE_FILE)
index 7e33f8c583e64c91d7cb6ac1fb330b01299b7be2..ed804a470dcd151c18915f956c2f325b6d22bb0f 100644 (file)
@@ -1194,7 +1194,11 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
                        recalc_sigpending_and_wake(t);
                }
        }
-       if (action->sa.sa_handler == SIG_DFL)
+       /*
+        * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
+        * debugging to leave init killable.
+        */
+       if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
                t->signal->flags &= ~SIGNAL_UNKILLABLE;
        ret = specific_send_sig_info(sig, info, t);
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
index 37385193a6084ed1b8fdd794eb0938c2021fcfa4..dc498b605d5dd36137eaba7bd0ee93da72a36c33 100644 (file)
@@ -204,10 +204,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
                fmt_cnt++;
        }
 
-       return __trace_printk(1/* fake ip will not be printed */, fmt,
-                             mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
-                             mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
-                             mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
+/* Horrid workaround for getting va_list handling working with different
+ * argument type combinations generically for 32 and 64 bit archs.
+ */
+#define __BPF_TP_EMIT()        __BPF_ARG3_TP()
+#define __BPF_TP(...)                                                  \
+       __trace_printk(1 /* Fake ip will not be printed. */,            \
+                      fmt, ##__VA_ARGS__)
+
+#define __BPF_ARG1_TP(...)                                             \
+       ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))        \
+         ? __BPF_TP(arg1, ##__VA_ARGS__)                               \
+         : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32))    \
+             ? __BPF_TP((long)arg1, ##__VA_ARGS__)                     \
+             : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
+
+#define __BPF_ARG2_TP(...)                                             \
+       ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64))        \
+         ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__)                          \
+         : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32))    \
+             ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__)                \
+             : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
+
+#define __BPF_ARG3_TP(...)                                             \
+       ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64))        \
+         ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__)                          \
+         : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32))    \
+             ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__)                \
+             : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
+
+       return __BPF_TP_EMIT();
 }
 
 static const struct bpf_func_proto bpf_trace_printk_proto = {
index 06d3389bca0df01c8a730f7b270767f6ed35a8a9..f5d52024f6b72a9d1354b1a44c12c3e3b6af06a9 100644 (file)
@@ -240,6 +240,7 @@ static void set_sample_period(void)
         * hardlockup detector generates a warning
         */
        sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
+       watchdog_update_hrtimer_threshold(sample_period);
 }
 
 /* Commands for resetting the watchdog */
index 295a0d84934cb1d3a9a87abd4a8ac0f1d38656a5..3a09ea1b1d3d5e6e284d058052403ac1396804ca 100644 (file)
@@ -37,6 +37,62 @@ void arch_touch_nmi_watchdog(void)
 }
 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
 
+#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
+static DEFINE_PER_CPU(ktime_t, last_timestamp);
+static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
+static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
+
+void watchdog_update_hrtimer_threshold(u64 period)
+{
+       /*
+        * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
+        *
+        * So it runs effectively with 2.5 times the rate of the NMI
+        * watchdog. That means the hrtimer should fire 2-3 times before
+        * the NMI watchdog expires. The NMI watchdog on x86 is based on
+        * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
+        * might run way faster than expected and the NMI fires in a
+        * smaller period than the one deduced from the nominal CPU
+        * frequency. Depending on the Turbo-Mode factor this might be fast
+        * enough to get the NMI period smaller than the hrtimer watchdog
+        * period and trigger false positives.
+        *
+        * The sample threshold is used to check in the NMI handler whether
+        * the minimum time between two NMI samples has elapsed. That
+        * prevents false positives.
+        *
+        * Set this to 4/5 of the actual watchdog threshold period so the
+        * hrtimer is guaranteed to fire at least once within the real
+        * watchdog threshold.
+        */
+       watchdog_hrtimer_sample_threshold = period * 2;
+}
+
+static bool watchdog_check_timestamp(void)
+{
+       ktime_t delta, now = ktime_get_mono_fast_ns();
+
+       delta = now - __this_cpu_read(last_timestamp);
+       if (delta < watchdog_hrtimer_sample_threshold) {
+               /*
+                * If ktime is jiffies based, a stalled timer would prevent
+                * jiffies from being incremented and the filter would look
+                * at a stale timestamp and never trigger.
+                */
+               if (__this_cpu_inc_return(nmi_rearmed) < 10)
+                       return false;
+       }
+       __this_cpu_write(nmi_rearmed, 0);
+       __this_cpu_write(last_timestamp, now);
+       return true;
+}
+#else
+static inline bool watchdog_check_timestamp(void)
+{
+       return true;
+}
+#endif
+
 static struct perf_event_attr wd_hw_attr = {
        .type           = PERF_TYPE_HARDWARE,
        .config         = PERF_COUNT_HW_CPU_CYCLES,
@@ -61,6 +117,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
                return;
        }
 
+       if (!watchdog_check_timestamp())
+               return;
+
        /* check for a hardlockup
         * This is done by making sure our timer interrupt
         * is incrementing.  The timer interrupt should have
index 98fe715522e8d1834083e608d32a78ed0600deb9..c617b9d1d6cb687c93fb0c65e8d05424f5dd3c43 100644 (file)
@@ -797,6 +797,13 @@ config HARDLOCKUP_DETECTOR_PERF
        bool
        select SOFTLOCKUP_DETECTOR
 
+#
+# Enables a timestamp based low pass filter to compensate for perf based
+# hard lockup detection which runs too fast due to turbo modes.
+#
+config HARDLOCKUP_CHECK_TIMESTAMP
+       bool
+
 #
 # arch/ can define HAVE_HARDLOCKUP_DETECTOR_ARCH to provide their own hard
 # lockup detector rather than the perf based detector.
index 7d315fdb9f13d9b17d8a2aa129c75790c7599bdb..cf7b129b0b2b08adcc1aae98f990c384761532dc 100644 (file)
@@ -110,10 +110,12 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
        if (in_task()) {
                unsigned int fail_nth = READ_ONCE(current->fail_nth);
 
-               if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1))
-                       goto fail;
+               if (fail_nth) {
+                       if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
+                               goto fail;
 
-               return false;
+                       return false;
+               }
        }
 
        /* No need to check any other properties if the probability is 0 */
index 6c1d678bcf8b00ff7b2d2fc70747045e6c14327a..ff9148969b9233ba7502b992b026d31e100be497 100644 (file)
@@ -485,7 +485,7 @@ static ssize_t config_show(struct device *dev,
                                config->test_driver);
        else
                len += snprintf(buf+len, PAGE_SIZE - len,
-                               "driver:\tEMTPY\n");
+                               "driver:\tEMPTY\n");
 
        if (config->test_fs)
                len += snprintf(buf+len, PAGE_SIZE - len,
@@ -493,7 +493,7 @@ static ssize_t config_show(struct device *dev,
                                config->test_fs);
        else
                len += snprintf(buf+len, PAGE_SIZE - len,
-                               "fs:\tEMTPY\n");
+                               "fs:\tEMPTY\n");
 
        mutex_unlock(&test_dev->config_mutex);
 
@@ -746,11 +746,11 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
                                                      strlen(test_str));
                break;
        case TEST_KMOD_FS_TYPE:
-               break;
                kfree_const(config->test_fs);
                config->test_driver = NULL;
                copied = config_copy_test_fs(config, test_str,
                                             strlen(test_str));
+               break;
        default:
                mutex_unlock(&test_dev->config_mutex);
                return -EINVAL;
@@ -880,10 +880,10 @@ static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
                                            int (*test_sync)(struct kmod_test_device *test_dev))
 {
        int ret;
-       long new;
+       unsigned long new;
        unsigned int old_val;
 
-       ret = kstrtol(buf, 10, &new);
+       ret = kstrtoul(buf, 10, &new);
        if (ret)
                return ret;
 
@@ -918,9 +918,9 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
                                             unsigned int max)
 {
        int ret;
-       long new;
+       unsigned long new;
 
-       ret = kstrtol(buf, 10, &new);
+       ret = kstrtoul(buf, 10, &new);
        if (ret)
                return ret;
 
@@ -1146,7 +1146,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
        struct kmod_test_device *test_dev = NULL;
        int ret;
 
-       mutex_unlock(&reg_dev_mutex);
+       mutex_lock(&reg_dev_mutex);
 
        /* int should suffice for number of devices, test for wrap */
        if (unlikely(num_test_devs + 1) < 0) {
index 9075aa54e95517cdbb1094f04e72c36357401e52..b06d9fe23a28c14f71c3263daaa84965dadeee45 100644 (file)
@@ -24,7 +24,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
 {
        unsigned long flags;
        struct page *page = alloc_page(balloon_mapping_gfp_mask() |
-                               __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_ZERO);
+                                      __GFP_NOMEMALLOC | __GFP_NORETRY);
        if (!page)
                return NULL;
 
index 595b757bef72722fb2715afd9ab0cd1536181c9e..c03ccbc405a066038619361cf995f675dc2efaa1 100644 (file)
@@ -167,7 +167,7 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
        char name[16];
        int u32s;
 
-       sprintf(name, "cma-%s", cma->name);
+       scnprintf(name, sizeof(name), "cma-%s", cma->name);
 
        tmp = debugfs_create_dir(name, cma_debugfs_root);
 
index db1cd26d8752022b7f8b576cdff78f5412209d39..5715448ab0b53db5d8bd4b64d47706f7deaaf7a6 100644 (file)
@@ -124,9 +124,7 @@ void dump_mm(const struct mm_struct *mm)
 #ifdef CONFIG_NUMA_BALANCING
                "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
                "tlb_flush_pending %d\n"
-#endif
                "def_flags: %#lx(%pGv)\n",
 
                mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
@@ -158,9 +156,7 @@ void dump_mm(const struct mm_struct *mm)
 #ifdef CONFIG_NUMA_BALANCING
                mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
 #endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
-               mm->tlb_flush_pending,
-#endif
+               atomic_read(&mm->tlb_flush_pending),
                mm->def_flags, &mm->def_flags
        );
 }
index 86975dec0ba160feadfb8aa0d13b8f2be943638d..90731e3b7e589ea9f83c21827916917a27f71b82 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/userfaultfd_k.h>
 #include <linux/page_idle.h>
 #include <linux/shmem_fs.h>
+#include <linux/oom.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -550,6 +551,7 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
        struct mem_cgroup *memcg;
        pgtable_t pgtable;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+       int ret = 0;
 
        VM_BUG_ON_PAGE(!PageCompound(page), page);
 
@@ -561,9 +563,8 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
 
        pgtable = pte_alloc_one(vma->vm_mm, haddr);
        if (unlikely(!pgtable)) {
-               mem_cgroup_cancel_charge(page, memcg, true);
-               put_page(page);
-               return VM_FAULT_OOM;
+               ret = VM_FAULT_OOM;
+               goto release;
        }
 
        clear_huge_page(page, haddr, HPAGE_PMD_NR);
@@ -576,13 +577,14 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
 
        vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
        if (unlikely(!pmd_none(*vmf->pmd))) {
-               spin_unlock(vmf->ptl);
-               mem_cgroup_cancel_charge(page, memcg, true);
-               put_page(page);
-               pte_free(vma->vm_mm, pgtable);
+               goto unlock_release;
        } else {
                pmd_t entry;
 
+               ret = check_stable_address_space(vma->vm_mm);
+               if (ret)
+                       goto unlock_release;
+
                /* Deliver the page fault to userland */
                if (userfaultfd_missing(vma)) {
                        int ret;
@@ -610,6 +612,15 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
        }
 
        return 0;
+unlock_release:
+       spin_unlock(vmf->ptl);
+release:
+       if (pgtable)
+               pte_free(vma->vm_mm, pgtable);
+       mem_cgroup_cancel_charge(page, memcg, true);
+       put_page(page);
+       return ret;
+
 }
 
 /*
@@ -688,7 +699,10 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                ret = 0;
                set = false;
                if (pmd_none(*vmf->pmd)) {
-                       if (userfaultfd_missing(vma)) {
+                       ret = check_stable_address_space(vma->vm_mm);
+                       if (ret) {
+                               spin_unlock(vmf->ptl);
+                       } else if (userfaultfd_missing(vma)) {
                                spin_unlock(vmf->ptl);
                                ret = handle_userfault(vmf, VM_UFFD_MISSING);
                                VM_BUG_ON(ret & VM_FAULT_FALLBACK);
@@ -1495,6 +1509,13 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
                goto clear_pmdnuma;
        }
 
+       /*
+        * The page_table_lock above provides a memory barrier
+        * with change_protection_range.
+        */
+       if (mm_tlb_flush_pending(vma->vm_mm))
+               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+
        /*
         * Migrate the THP to the requested node, returns with page unlocked
         * and access rights restored.
index a1a0ac0ad6f67ad479916fcbc43036973ddca824..31e207cb399bebd11371e46eb26f625a5b74487c 100644 (file)
@@ -4062,9 +4062,9 @@ out:
        return ret;
 out_release_unlock:
        spin_unlock(ptl);
-out_release_nounlock:
        if (vm_shared)
                unlock_page(page);
+out_release_nounlock:
        put_page(page);
        goto out;
 }
index 4dc92f138786988c4ef0f9d371ff8a48b2e6e905..db20f8436bc3c15bf05f86ccec5e7b1f80d807cc 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1038,7 +1038,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                goto out_unlock;
 
        if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
-           (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
+           (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
+                                               mm_tlb_flush_pending(mm)) {
                pte_t entry;
 
                swapped = PageSwapCache(page);
index 2cb25fe4452c279c5ff6ff74cbbfee64128d820e..bf14aea6ab709dc61666c1994718d9d244291e22 100644 (file)
@@ -285,31 +285,27 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
 }
 
 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
-
-phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
-                                       phys_addr_t *addr)
-{
-       if (memblock.reserved.regions == memblock_reserved_init_regions)
-               return 0;
-
-       *addr = __pa(memblock.reserved.regions);
-
-       return PAGE_ALIGN(sizeof(struct memblock_region) *
-                         memblock.reserved.max);
-}
-
-phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
-                                       phys_addr_t *addr)
+/**
+ * Discard memory and reserved arrays if they were allocated
+ */
+void __init memblock_discard(void)
 {
-       if (memblock.memory.regions == memblock_memory_init_regions)
-               return 0;
+       phys_addr_t addr, size;
 
-       *addr = __pa(memblock.memory.regions);
+       if (memblock.reserved.regions != memblock_reserved_init_regions) {
+               addr = __pa(memblock.reserved.regions);
+               size = PAGE_ALIGN(sizeof(struct memblock_region) *
+                                 memblock.reserved.max);
+               __memblock_free_late(addr, size);
+       }
 
-       return PAGE_ALIGN(sizeof(struct memblock_region) *
-                         memblock.memory.max);
+       if (memblock.memory.regions == memblock_memory_init_regions) {
+               addr = __pa(memblock.memory.regions);
+               size = PAGE_ALIGN(sizeof(struct memblock_region) *
+                                 memblock.memory.max);
+               __memblock_free_late(addr, size);
+       }
 }
-
 #endif
 
 /**
index 3df3c04d73ab08e3bbb663f25b2e195b396d2149..e09741af816f8a6d5343546ebd23eb7f25f8ab54 100644 (file)
@@ -1611,9 +1611,13 @@ cleanup:
  * @page: the page
  *
  * This function protects unlocked LRU pages from being moved to
- * another cgroup and stabilizes their page->mem_cgroup binding.
+ * another cgroup.
+ *
+ * It ensures lifetime of the returned memcg. Caller is responsible
+ * for the lifetime of the page; __unlock_page_memcg() is available
+ * when @page might get freed inside the locked section.
  */
-void lock_page_memcg(struct page *page)
+struct mem_cgroup *lock_page_memcg(struct page *page)
 {
        struct mem_cgroup *memcg;
        unsigned long flags;
@@ -1622,18 +1626,24 @@ void lock_page_memcg(struct page *page)
         * The RCU lock is held throughout the transaction.  The fast
         * path can get away without acquiring the memcg->move_lock
         * because page moving starts with an RCU grace period.
-        */
+        *
+        * The RCU lock also protects the memcg from being freed when
+        * the page state that is going to change is the only thing
+        * preventing the page itself from being freed. E.g. writeback
+        * doesn't hold a page reference and relies on PG_writeback to
+        * keep off truncation, migration and so forth.
+         */
        rcu_read_lock();
 
        if (mem_cgroup_disabled())
-               return;
+               return NULL;
 again:
        memcg = page->mem_cgroup;
        if (unlikely(!memcg))
-               return;
+               return NULL;
 
        if (atomic_read(&memcg->moving_account) <= 0)
-               return;
+               return memcg;
 
        spin_lock_irqsave(&memcg->move_lock, flags);
        if (memcg != page->mem_cgroup) {
@@ -1649,18 +1659,18 @@ again:
        memcg->move_lock_task = current;
        memcg->move_lock_flags = flags;
 
-       return;
+       return memcg;
 }
 EXPORT_SYMBOL(lock_page_memcg);
 
 /**
- * unlock_page_memcg - unlock a page->mem_cgroup binding
- * @page: the page
+ * __unlock_page_memcg - unlock and unpin a memcg
+ * @memcg: the memcg
+ *
+ * Unlock and unpin a memcg returned by lock_page_memcg().
  */
-void unlock_page_memcg(struct page *page)
+void __unlock_page_memcg(struct mem_cgroup *memcg)
 {
-       struct mem_cgroup *memcg = page->mem_cgroup;
-
        if (memcg && memcg->move_lock_task == current) {
                unsigned long flags = memcg->move_lock_flags;
 
@@ -1672,6 +1682,15 @@ void unlock_page_memcg(struct page *page)
 
        rcu_read_unlock();
 }
+
+/**
+ * unlock_page_memcg - unlock a page->mem_cgroup binding
+ * @page: the page
+ */
+void unlock_page_memcg(struct page *page)
+{
+       __unlock_page_memcg(page->mem_cgroup);
+}
 EXPORT_SYMBOL(unlock_page_memcg);
 
 /*
index f65beaad319be4c597f9a071771e5f376234d753..fe2fba27ded2fab229d0ef7a4908551343d31b89 100644 (file)
@@ -68,6 +68,7 @@
 #include <linux/debugfs.h>
 #include <linux/userfaultfd_k.h>
 #include <linux/dax.h>
+#include <linux/oom.h>
 
 #include <asm/io.h>
 #include <asm/mmu_context.h>
@@ -215,12 +216,8 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
        return true;
 }
 
-/* tlb_gather_mmu
- *     Called to initialize an (on-stack) mmu_gather structure for page-table
- *     tear-down from @mm. The @fullmm argument is used when @mm is without
- *     users and we're going to destroy the full address space (exit/execve).
- */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                               unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
 
@@ -275,10 +272,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
  *     Called at the end of the shootdown operation to free up any resources
  *     that were required.
  */
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end, bool force)
 {
        struct mmu_gather_batch *batch, *next;
 
+       if (force)
+               __tlb_adjust_range(tlb, start, end - start);
+
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
@@ -398,6 +399,34 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 
 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 
+/* tlb_gather_mmu
+ *     Called to initialize an (on-stack) mmu_gather structure for page-table
+ *     tear-down from @mm. The @fullmm argument is used when @mm is without
+ *     users and we're going to destroy the full address space (exit/execve).
+ */
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
+{
+       arch_tlb_gather_mmu(tlb, mm, start, end);
+       inc_tlb_flush_pending(tlb->mm);
+}
+
+void tlb_finish_mmu(struct mmu_gather *tlb,
+               unsigned long start, unsigned long end)
+{
+       /*
+        * If there are parallel threads are doing PTE changes on same range
+        * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
+        * flush by batching, a thread has stable TLB entry can fail to flush
+        * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
+        * forcefully if we detect parallel PTE batching threads.
+        */
+       bool force = mm_tlb_flush_nested(tlb->mm);
+
+       arch_tlb_finish_mmu(tlb, start, end, force);
+       dec_tlb_flush_pending(tlb->mm);
+}
+
 /*
  * Note: this doesn't free the actual pages themselves. That
  * has been handled earlier when unmapping all the memory regions.
@@ -2865,6 +2894,7 @@ static int do_anonymous_page(struct vm_fault *vmf)
        struct vm_area_struct *vma = vmf->vma;
        struct mem_cgroup *memcg;
        struct page *page;
+       int ret = 0;
        pte_t entry;
 
        /* File mapping without ->vm_ops ? */
@@ -2897,6 +2927,9 @@ static int do_anonymous_page(struct vm_fault *vmf)
                                vmf->address, &vmf->ptl);
                if (!pte_none(*vmf->pte))
                        goto unlock;
+               ret = check_stable_address_space(vma->vm_mm);
+               if (ret)
+                       goto unlock;
                /* Deliver the page fault to userland, check inside PT lock */
                if (userfaultfd_missing(vma)) {
                        pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2931,6 +2964,10 @@ static int do_anonymous_page(struct vm_fault *vmf)
        if (!pte_none(*vmf->pte))
                goto release;
 
+       ret = check_stable_address_space(vma->vm_mm);
+       if (ret)
+               goto release;
+
        /* Deliver the page fault to userland, check inside PT lock */
        if (userfaultfd_missing(vma)) {
                pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2950,7 +2987,7 @@ setpte:
        update_mmu_cache(vma, vmf->address, vmf->pte);
 unlock:
        pte_unmap_unlock(vmf->pte, vmf->ptl);
-       return 0;
+       return ret;
 release:
        mem_cgroup_cancel_charge(page, memcg, false);
        put_page(page);
@@ -3224,7 +3261,7 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 int finish_fault(struct vm_fault *vmf)
 {
        struct page *page;
-       int ret;
+       int ret = 0;
 
        /* Did we COW the page? */
        if ((vmf->flags & FAULT_FLAG_WRITE) &&
@@ -3232,7 +3269,15 @@ int finish_fault(struct vm_fault *vmf)
                page = vmf->cow_page;
        else
                page = vmf->page;
-       ret = alloc_set_pte(vmf, vmf->memcg, page);
+
+       /*
+        * check even for read faults because we might have lost our CoWed
+        * page
+        */
+       if (!(vmf->vma->vm_flags & VM_SHARED))
+               ret = check_stable_address_space(vmf->vma->vm_mm);
+       if (!ret)
+               ret = alloc_set_pte(vmf, vmf->memcg, page);
        if (vmf->pte)
                pte_unmap_unlock(vmf->pte, vmf->ptl);
        return ret;
@@ -3872,19 +3917,6 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
                        mem_cgroup_oom_synchronize(false);
        }
 
-       /*
-        * This mm has been already reaped by the oom reaper and so the
-        * refault cannot be trusted in general. Anonymous refaults would
-        * lose data and give a zero page instead e.g. This is especially
-        * problem for use_mm() because regular tasks will just die and
-        * the corrupted data will not be visible anywhere while kthread
-        * will outlive the oom victim and potentially propagate the date
-        * further.
-        */
-       if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
-                               && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
-               ret = VM_FAULT_SIGBUS;
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(handle_mm_fault);
index d911fa5cb2a73fe464042a59e1c2676c1129239d..618ab125228baec0810146a0638ce80ce4d50284 100644 (file)
@@ -861,11 +861,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
                *policy |= (pol->flags & MPOL_MODE_FLAGS);
        }
 
-       if (vma) {
-               up_read(&current->mm->mmap_sem);
-               vma = NULL;
-       }
-
        err = 0;
        if (nmask) {
                if (mpol_store_user_nodemask(pol)) {
index 62767155187356d54d1fa7333ad402e76183ca0b..d68a41da6abb0743d6b09cc49c5c9524463715c3 100644 (file)
@@ -1937,12 +1937,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                put_page(new_page);
                goto out_fail;
        }
-       /*
-        * We are not sure a pending tlb flush here is for a huge page
-        * mapping or not. Hence use the tlb range variant
-        */
-       if (mm_tlb_flush_pending(mm))
-               flush_tlb_range(vma, mmun_start, mmun_end);
 
        /* Prepare a page as a migration target */
        __SetPageLocked(new_page);
index 4180ad8cc9c5e70c661efc8f30416af40e9c0066..bd0f409922cb2fc133f9fecba64a839380d4f937 100644 (file)
@@ -244,7 +244,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
        BUG_ON(addr >= end);
        pgd = pgd_offset(mm, addr);
        flush_cache_range(vma, addr, end);
-       set_tlb_flush_pending(mm);
+       inc_tlb_flush_pending(mm);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
@@ -256,7 +256,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
        /* Only flush the TLB if we actually modified any entries: */
        if (pages)
                flush_tlb_range(vma, start, end);
-       clear_tlb_flush_pending(mm);
+       dec_tlb_flush_pending(mm);
 
        return pages;
 }
index 36454d0f96ee6b91383554c83015a2b47e66f038..3637809a18d04f9c20d1b00d70687ae1eb00b282 100644 (file)
@@ -146,22 +146,6 @@ static unsigned long __init free_low_memory_core_early(void)
                                NULL)
                count += __free_memory_core(start, end);
 
-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
-       {
-               phys_addr_t size;
-
-               /* Free memblock.reserved array if it was allocated */
-               size = get_allocated_memblock_reserved_regions_info(&start);
-               if (size)
-                       count += __free_memory_core(start, start + size);
-
-               /* Free memblock.memory array if it was allocated */
-               size = get_allocated_memblock_memory_regions_info(&start);
-               if (size)
-                       count += __free_memory_core(start, start + size);
-       }
-#endif
-
        return count;
 }
 
index 96e93b214d317baf4fb4ffbb5fcbe726e110980d..bf050ab025b76a268cd09a37173eac4f86febcb8 100644 (file)
@@ -2724,9 +2724,12 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
 int test_clear_page_writeback(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
+       struct mem_cgroup *memcg;
+       struct lruvec *lruvec;
        int ret;
 
-       lock_page_memcg(page);
+       memcg = lock_page_memcg(page);
+       lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
        if (mapping && mapping_use_writeback_tags(mapping)) {
                struct inode *inode = mapping->host;
                struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2754,12 +2757,18 @@ int test_clear_page_writeback(struct page *page)
        } else {
                ret = TestClearPageWriteback(page);
        }
+       /*
+        * NOTE: Page might be free now! Writeback doesn't hold a page
+        * reference on its own, it relies on truncation to wait for
+        * the clearing of PG_writeback. The below can only access
+        * page state that is static across allocation cycles.
+        */
        if (ret) {
-               dec_lruvec_page_state(page, NR_WRITEBACK);
+               dec_lruvec_state(lruvec, NR_WRITEBACK);
                dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                inc_node_page_state(page, NR_WRITTEN);
        }
-       unlock_page_memcg(page);
+       __unlock_page_memcg(memcg);
        return ret;
 }
 
index fc32aa81f3593537cc2b11d5f63b5c5f517097a4..1bad301820c7a2e2729fc2f7c04e4b3694131576 100644 (file)
@@ -1584,6 +1584,10 @@ void __init page_alloc_init_late(void)
        /* Reinit limits that are based on free pages after the kernel is up */
        files_maxfiles_init();
 #endif
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+       /* Discard memblock private memory */
+       memblock_discard();
+#endif
 
        for_each_populated_zone(zone)
                set_zone_contiguous(zone);
@@ -4458,8 +4462,9 @@ long si_mem_available(void)
         * Part of the reclaimable slab consists of items that are in use,
         * and cannot be freed. Cap this estimate at the low watermark.
         */
-       available += global_page_state(NR_SLAB_RECLAIMABLE) -
-                    min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
+       available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
+                    min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
+                        wmark_low);
 
        if (available < 0)
                available = 0;
@@ -4602,8 +4607,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                global_node_page_state(NR_FILE_DIRTY),
                global_node_page_state(NR_WRITEBACK),
                global_node_page_state(NR_UNSTABLE_NFS),
-               global_page_state(NR_SLAB_RECLAIMABLE),
-               global_page_state(NR_SLAB_UNRECLAIMABLE),
+               global_node_page_state(NR_SLAB_RECLAIMABLE),
+               global_node_page_state(NR_SLAB_UNRECLAIMABLE),
                global_node_page_state(NR_FILE_MAPPED),
                global_node_page_state(NR_SHMEM),
                global_page_state(NR_PAGETABLE),
@@ -7668,7 +7673,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /* Make sure the range is really isolated. */
        if (test_pages_isolated(outer_start, end, false)) {
-               pr_info("%s: [%lx, %lx) PFNs busy\n",
+               pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
                        __func__, outer_start, end);
                ret = -EBUSY;
                goto done;
index c8993c63eb259b3a5302a058ce231d1290fc9b66..c1286d47aa1fad7fee7ea5bb865a2dc7efd672f2 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -888,10 +888,10 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                .flags = PVMW_SYNC,
        };
        int *cleaned = arg;
+       bool invalidation_needed = false;
 
        while (page_vma_mapped_walk(&pvmw)) {
                int ret = 0;
-               address = pvmw.address;
                if (pvmw.pte) {
                        pte_t entry;
                        pte_t *pte = pvmw.pte;
@@ -899,11 +899,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        if (!pte_dirty(*pte) && !pte_write(*pte))
                                continue;
 
-                       flush_cache_page(vma, address, pte_pfn(*pte));
-                       entry = ptep_clear_flush(vma, address, pte);
+                       flush_cache_page(vma, pvmw.address, pte_pfn(*pte));
+                       entry = ptep_clear_flush(vma, pvmw.address, pte);
                        entry = pte_wrprotect(entry);
                        entry = pte_mkclean(entry);
-                       set_pte_at(vma->vm_mm, address, pte, entry);
+                       set_pte_at(vma->vm_mm, pvmw.address, pte, entry);
                        ret = 1;
                } else {
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -913,11 +913,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
                                continue;
 
-                       flush_cache_page(vma, address, page_to_pfn(page));
-                       entry = pmdp_huge_clear_flush(vma, address, pmd);
+                       flush_cache_page(vma, pvmw.address, page_to_pfn(page));
+                       entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);
                        entry = pmd_wrprotect(entry);
                        entry = pmd_mkclean(entry);
-                       set_pmd_at(vma->vm_mm, address, pmd, entry);
+                       set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);
                        ret = 1;
 #else
                        /* unexpected pmd-mapped page? */
@@ -926,11 +926,16 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                }
 
                if (ret) {
-                       mmu_notifier_invalidate_page(vma->vm_mm, address);
                        (*cleaned)++;
+                       invalidation_needed = true;
                }
        }
 
+       if (invalidation_needed) {
+               mmu_notifier_invalidate_range(vma->vm_mm, address,
+                               address + (1UL << compound_order(page)));
+       }
+
        return true;
 }
 
@@ -1323,7 +1328,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        };
        pte_t pteval;
        struct page *subpage;
-       bool ret = true;
+       bool ret = true, invalidation_needed = false;
        enum ttu_flags flags = (enum ttu_flags)arg;
 
        /* munlock has nothing to gain from examining un-locked vmas */
@@ -1363,11 +1368,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                VM_BUG_ON_PAGE(!pvmw.pte, page);
 
                subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
-               address = pvmw.address;
-
 
                if (!(flags & TTU_IGNORE_ACCESS)) {
-                       if (ptep_clear_flush_young_notify(vma, address,
+                       if (ptep_clear_flush_young_notify(vma, pvmw.address,
                                                pvmw.pte)) {
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
@@ -1376,7 +1379,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                }
 
                /* Nuke the page table entry. */
-               flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
+               flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));
                if (should_defer_flush(mm, flags)) {
                        /*
                         * We clear the PTE but do not flush so potentially
@@ -1386,11 +1389,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * transition on a cached TLB entry is written through
                         * and traps if the PTE is unmapped.
                         */
-                       pteval = ptep_get_and_clear(mm, address, pvmw.pte);
+                       pteval = ptep_get_and_clear(mm, pvmw.address,
+                                                   pvmw.pte);
 
                        set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
                } else {
-                       pteval = ptep_clear_flush(vma, address, pvmw.pte);
+                       pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
                }
 
                /* Move the dirty bit to the page. Now the pte is gone. */
@@ -1405,12 +1409,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (PageHuge(page)) {
                                int nr = 1 << compound_order(page);
                                hugetlb_count_sub(nr, mm);
-                               set_huge_swap_pte_at(mm, address,
+                               set_huge_swap_pte_at(mm, pvmw.address,
                                                     pvmw.pte, pteval,
                                                     vma_mmu_pagesize(vma));
                        } else {
                                dec_mm_counter(mm, mm_counter(page));
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                        }
 
                } else if (pte_unused(pteval)) {
@@ -1434,7 +1438,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                } else if (PageAnon(page)) {
                        swp_entry_t entry = { .val = page_private(subpage) };
                        pte_t swp_pte;
@@ -1460,7 +1464,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                 * If the page was redirtied, it cannot be
                                 * discarded. Remap the page to page table.
                                 */
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                                SetPageSwapBacked(page);
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
@@ -1468,7 +1472,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        }
 
                        if (swap_duplicate(entry) < 0) {
-                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
                                break;
@@ -1484,14 +1488,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                } else
                        dec_mm_counter(mm, mm_counter_file(page));
 discard:
                page_remove_rmap(subpage, PageHuge(page));
                put_page(page);
-               mmu_notifier_invalidate_page(mm, address);
+               invalidation_needed = true;
        }
+
+       if (invalidation_needed)
+               mmu_notifier_invalidate_range(mm, address,
+                               address + (1UL << compound_order(page)));
        return ret;
 }
 
index b0aa6075d164df9ae4766876cc823394abaebc6d..6540e598244412023db650412062604b704b58b3 100644 (file)
@@ -1022,7 +1022,11 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
                         */
                        if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
                                spin_lock(&sbinfo->shrinklist_lock);
-                               if (list_empty(&info->shrinklist)) {
+                               /*
+                                * _careful to defend against unlocked access to
+                                * ->shrink_list in shmem_unused_huge_shrink()
+                                */
+                               if (list_empty_careful(&info->shrinklist)) {
                                        list_add_tail(&info->shrinklist,
                                                        &sbinfo->shrinklist);
                                        sbinfo->shrinklist_len++;
@@ -1817,7 +1821,11 @@ alloc_nohuge:            page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
                         * to shrink under memory pressure.
                         */
                        spin_lock(&sbinfo->shrinklist_lock);
-                       if (list_empty(&info->shrinklist)) {
+                       /*
+                        * _careful to defend against unlocked access to
+                        * ->shrink_list in shmem_unused_huge_shrink()
+                        */
+                       if (list_empty_careful(&info->shrinklist)) {
                                list_add_tail(&info->shrinklist,
                                                &sbinfo->shrinklist);
                                sbinfo->shrinklist_len++;
index 1d3f9835f4eabe91494f48d8ace08a5e42a895f9..e8b4e31162cae8c4d8e473ae2d78769aaa55089e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5642,13 +5642,14 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
                 * A cache is never shut down before deactivation is
                 * complete, so no need to worry about synchronization.
                 */
-               return;
+               goto out;
 
 #ifdef CONFIG_MEMCG
        kset_unregister(s->memcg_kset);
 #endif
        kobject_uevent(&s->kobj, KOBJ_REMOVE);
        kobject_del(&s->kobj);
+out:
        kobject_put(&s->kobj);
 }
 
index 7b07ec852e01fa931b2b302e8df5cff9f17f62d6..9ecddf568fe30e5cf1fba6db8eda3b7abe96d379 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -633,7 +633,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                 * which are reclaimable, under pressure.  The dentry
                 * cache and most inode caches should fall into this
                 */
-               free += global_page_state(NR_SLAB_RECLAIMABLE);
+               free += global_node_page_state(NR_SLAB_RECLAIMABLE);
 
                /*
                 * Leave reserved pages. The pages are not for anonymous pages.
index 8698c1c86c4dbed685269eae1ecded2f5e714368..a47e3894c775646cd636c3f48bcb75fd4b8771d6 100644 (file)
@@ -1671,7 +1671,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        struct page **pages;
        unsigned int nr_pages, array_size, i;
        const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
-       const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN;
+       const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
+       const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
+                                       0 :
+                                       __GFP_HIGHMEM;
 
        nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
        array_size = (nr_pages * sizeof(struct page *));
@@ -1679,7 +1682,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        area->nr_pages = nr_pages;
        /* Please note that the recursion is strictly bounded. */
        if (array_size > PAGE_SIZE) {
-               pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
+               pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
                                PAGE_KERNEL, node, area->caller);
        } else {
                pages = kmalloc_node(array_size, nested_gfp, node);
@@ -1700,9 +1703,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                }
 
                if (node == NUMA_NO_NODE)
-                       page = alloc_page(alloc_mask);
+                       page = alloc_page(alloc_mask|highmem_mask);
                else
-                       page = alloc_pages_node(node, alloc_mask, 0);
+                       page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
 
                if (unlikely(!page)) {
                        /* Successfully allocated i pages, free them in __vunmap() */
@@ -1710,7 +1713,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                        goto fail;
                }
                area->pages[i] = page;
-               if (gfpflags_allow_blocking(gfp_mask))
+               if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
                        cond_resched();
        }
 
index f44fc22fd45aca4941c618abf97eb48494a67c16..6280a602604c2e0e05c57d0c2104b5f4791f6928 100644 (file)
@@ -3505,6 +3505,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                                              bpf_target_off(struct sk_buff, tc_index, 2,
                                                             target_size));
 #else
+               *target_size = 2;
                if (type == BPF_WRITE)
                        *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
                else
@@ -3520,6 +3521,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
                *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
 #else
+               *target_size = 4;
                *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
 #endif
                break;
index 9fe25bf6329691ecf0acdc35df7278b074d446c1..86bc40ba6ba5b4a006c6c2f74939b8d259da850a 100644 (file)
@@ -201,10 +201,7 @@ void dccp_destroy_sock(struct sock *sk)
 {
        struct dccp_sock *dp = dccp_sk(sk);
 
-       /*
-        * DCCP doesn't use sk_write_queue, just sk_send_head
-        * for retransmissions
-        */
+       __skb_queue_purge(&sk->sk_write_queue);
        if (sk->sk_send_head != NULL) {
                kfree_skb(sk->sk_send_head);
                sk->sk_send_head = NULL;
index fab41de8e9837b512709b3c853ffb4831fe08317..de66ca8e620177693e55b54492404d8d3ee52197 100644 (file)
@@ -42,6 +42,9 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
        padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
 
        if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) {
+               if (skb_put_padto(skb, skb->len + padlen))
+                       return NULL;
+
                nskb = skb;
        } else {
                nskb = alloc_skb(NET_IP_ALIGN + skb->len +
@@ -56,13 +59,15 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
                skb_set_transport_header(nskb,
                                         skb_transport_header(skb) - skb->head);
                skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
+
+               if (skb_put_padto(nskb, nskb->len + padlen)) {
+                       kfree_skb(nskb);
+                       return NULL;
+               }
+
                kfree_skb(skb);
        }
 
-       /* skb is freed when it fails */
-       if (skb_put_padto(nskb, nskb->len + padlen))
-               return NULL;
-
        tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
        tag[0] = 0;
        tag[1] = 1 << p->dp->index; /* destination port */
index 76c2077c3f5b697bf8e0d4b030b70dde8fc70345..2e548eca34898f51316275c918bb1f0f4a63526e 100644 (file)
@@ -1731,6 +1731,13 @@ static __net_init int inet_init_net(struct net *net)
        net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
 #endif
 
+       /* Some igmp sysctl, whose values are always used */
+       net->ipv4.sysctl_igmp_max_memberships = 20;
+       net->ipv4.sysctl_igmp_max_msf = 10;
+       /* IGMP reports for link-local multicast groups are enabled by default */
+       net->ipv4.sysctl_igmp_llm_reports = 1;
+       net->ipv4.sysctl_igmp_qrv = 2;
+
        return 0;
 }
 
index b8d18171cca33ab5dab67408c3cd11ad57f25b83..ec3a9ce281a6ffb86b62e21f7284fd7c801668f0 100644 (file)
@@ -1083,15 +1083,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (!fi)
                goto failure;
-       fib_info_cnt++;
        if (cfg->fc_mx) {
                fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
-               if (!fi->fib_metrics)
-                       goto failure;
+               if (unlikely(!fi->fib_metrics)) {
+                       kfree(fi);
+                       return ERR_PTR(err);
+               }
                atomic_set(&fi->fib_metrics->refcnt, 1);
-       } else
+       } else {
                fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
-
+       }
+       fib_info_cnt++;
        fi->fib_net = net;
        fi->fib_protocol = cfg->fc_protocol;
        fi->fib_scope = cfg->fc_scope;
index 28f14afd0dd3a392da3b84c5e791fffaf46ad254..498706b072fb70e1ffe6b5dba817816db5a4cfa7 100644 (file)
@@ -2974,12 +2974,6 @@ static int __net_init igmp_net_init(struct net *net)
                goto out_sock;
        }
 
-       /* Sysctl initialization */
-       net->ipv4.sysctl_igmp_max_memberships = 20;
-       net->ipv4.sysctl_igmp_max_msf = 10;
-       /* IGMP reports for link-local multicast groups are enabled by default */
-       net->ipv4.sysctl_igmp_llm_reports = 1;
-       net->ipv4.sysctl_igmp_qrv = 2;
        return 0;
 
 out_sock:
index 50c74cd890bc79ed6c85c958c5397d833e9aa74a..e153c40c2436109d4bca4a9caf34b90cbf000cd9 100644 (file)
@@ -965,11 +965,12 @@ static int __ip_append_data(struct sock *sk,
                csummode = CHECKSUM_PARTIAL;
 
        cork->length += length;
-       if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
-            (skb && skb_is_gso(skb))) &&
+       if ((skb && skb_is_gso(skb)) ||
+           (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
+           (skb_queue_len(queue) <= 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
-           (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+           (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
                err = ip_ufo_append_data(sk, queue, getfrag, from, length,
                                         hh_len, fragheaderlen, transhdrlen,
                                         maxfraglen, flags);
@@ -1288,6 +1289,7 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
                return -EINVAL;
 
        if ((size + skb->len > mtu) &&
+           (skb_queue_len(&sk->sk_write_queue) == 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO)) {
                if (skb->ip_summed != CHECKSUM_PARTIAL)
index 0383e66f59bcef3bd6b8627edae9aa2d34139f5d..7effa62beed3fa9065ab7d6365b1d79143bfae79 100644 (file)
@@ -2750,12 +2750,13 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                err = 0;
                if (IS_ERR(rt))
                        err = PTR_ERR(rt);
+               else
+                       skb_dst_set(skb, &rt->dst);
        }
 
        if (err)
                goto errout_free;
 
-       skb_dst_set(skb, &rt->dst);
        if (rtm->rtm_flags & RTM_F_NOTIFY)
                rt->rt_flags |= RTCF_NOTIFY;
 
index a20e7f03d5f7d81ccf7e92de9bfbbcc3e2df6718..e9252c7df8091a8e0d2fc9d7e5722e9fd605a857 100644 (file)
@@ -1722,6 +1722,8 @@ process:
                 */
                sock_hold(sk);
                refcounted = true;
+               if (tcp_filter(sk, skb))
+                       goto discard_and_relse;
                nsk = tcp_check_req(sk, skb, req, false);
                if (!nsk) {
                        reqsk_put(req);
@@ -1729,8 +1731,6 @@ process:
                }
                if (nsk == sk) {
                        reqsk_put(req);
-               } else if (tcp_filter(sk, skb)) {
-                       goto discard_and_relse;
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v4_send_reset(nsk, skb);
                        goto discard_and_relse;
index 2417f55374c593c89b2aeb1ec4f6e6e74bb1395f..6bb9e14c710a7e2bfa58ee63ff6e02461a22cbec 100644 (file)
@@ -122,14 +122,14 @@ int tcp_set_ulp(struct sock *sk, const char *name)
 
        ulp_ops = __tcp_ulp_find_autoload(name);
        if (!ulp_ops)
-               err = -ENOENT;
-       else
-               err = ulp_ops->init(sk);
+               return -ENOENT;
 
-       if (err)
-               goto out;
+       err = ulp_ops->init(sk);
+       if (err) {
+               module_put(ulp_ops->owner);
+               return err;
+       }
 
        icsk->icsk_ulp_ops = ulp_ops;
- out:
-       return err;
+       return 0;
 }
index e6276fa3750b909615668fddf84495369bd7d369..a7c804f73990a0610bc85c02fc2dd76858973c22 100644 (file)
@@ -802,7 +802,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
        if (is_udplite)                                  /*     UDP-Lite      */
                csum = udplite_csum(skb);
 
-       else if (sk->sk_no_check_tx) {   /* UDP csum disabled */
+       else if (sk->sk_no_check_tx && !skb_is_gso(skb)) {   /* UDP csum off */
 
                skb->ip_summed = CHECKSUM_NONE;
                goto send;
index 162efba0d0cd851848363588318cf6ade4a5a62c..2dfe50d8d609a7a623edacbe40e93022dfac685e 100644 (file)
@@ -1381,11 +1381,12 @@ emsgsize:
         */
 
        cork->length += length;
-       if ((((length + (skb ? skb->len : headersize)) > mtu) ||
-            (skb && skb_is_gso(skb))) &&
+       if ((skb && skb_is_gso(skb)) ||
+           (((length + (skb ? skb->len : headersize)) > mtu) &&
+           (skb_queue_len(queue) <= 1) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
-           (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
+           (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
                err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
                                          hh_len, fragheaderlen, exthdrlen,
                                          transhdrlen, mtu, flags, fl6);
index a640fbcba15dbf246e419d3e03da8eca0fa6901a..94d6a13d47f0e9ec5ff4cbc50b90d4cd9ca3f38a 100644 (file)
@@ -417,14 +417,11 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
        struct net_device *loopback_dev =
                dev_net(dev)->loopback_dev;
 
-       if (dev != loopback_dev) {
-               if (idev && idev->dev == dev) {
-                       struct inet6_dev *loopback_idev =
-                               in6_dev_get(loopback_dev);
-                       if (loopback_idev) {
-                               rt->rt6i_idev = loopback_idev;
-                               in6_dev_put(idev);
-                       }
+       if (idev && idev->dev != loopback_dev) {
+               struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
+               if (loopback_idev) {
+                       rt->rt6i_idev = loopback_idev;
+                       in6_dev_put(idev);
                }
        }
 }
@@ -3724,10 +3721,10 @@ static int ip6_route_dev_notify(struct notifier_block *this,
                /* NETDEV_UNREGISTER could be fired for multiple times by
                 * netdev_wait_allrefs(). Make sure we only call this once.
                 */
-               in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
+               in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
-               in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
-               in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
+               in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
+               in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
 #endif
        }
 
index 2521690d62d6e591af594c3629f71f004240ed68..206210125fd71d129a9ed2ead51a536749ab62b8 100644 (file)
@@ -1456,6 +1456,8 @@ process:
                }
                sock_hold(sk);
                refcounted = true;
+               if (tcp_filter(sk, skb))
+                       goto discard_and_relse;
                nsk = tcp_check_req(sk, skb, req, false);
                if (!nsk) {
                        reqsk_put(req);
@@ -1464,8 +1466,6 @@ process:
                if (nsk == sk) {
                        reqsk_put(req);
                        tcp_v6_restore_cb(skb);
-               } else if (tcp_filter(sk, skb)) {
-                       goto discard_and_relse;
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v6_send_reset(nsk, skb);
                        goto discard_and_relse;
index ca9d3ae665e76ea847a4ce03b4d275f80d7705bc..98f4d8211b9a9d9bc26e7d9979c5c2bf27c1b344 100644 (file)
@@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
 #define BROADCAST_ONE          1
 #define BROADCAST_REGISTERED   2
 #define BROADCAST_PROMISC_ONLY 4
-static int pfkey_broadcast(struct sk_buff *skb,
+static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
                           int broadcast_flags, struct sock *one_sk,
                           struct net *net)
 {
@@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
        rcu_read_unlock();
 
        if (one_sk != NULL)
-               err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
+               err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
 
        kfree_skb(skb2);
        kfree_skb(skb);
@@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
                hdr = (struct sadb_msg *) pfk->dump.skb->data;
                hdr->sadb_msg_seq = 0;
                hdr->sadb_msg_errno = rc;
-               pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+               pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
                                &pfk->sk, sock_net(&pfk->sk));
                pfk->dump.skb = NULL;
        }
@@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
                             sizeof(uint64_t));
 
-       pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+       pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
 
        return 0;
 }
@@ -1389,7 +1389,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
 
        xfrm_state_put(x);
 
-       pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
+       pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
 
        return 0;
 }
@@ -1476,7 +1476,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
        hdr->sadb_msg_seq = c->seq;
        hdr->sadb_msg_pid = c->portid;
 
-       pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
+       pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
 
        return 0;
 }
@@ -1589,7 +1589,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
        out_hdr->sadb_msg_reserved = 0;
        out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
        out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-       pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
+       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
 
        return 0;
 }
@@ -1694,8 +1694,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
                return -ENOBUFS;
        }
 
-       pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
-
+       pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
+                       sock_net(sk));
        return 0;
 }
 
@@ -1712,7 +1712,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
        hdr->sadb_msg_errno = (uint8_t) 0;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
 
-       return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+       return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
+                              sock_net(sk));
 }
 
 static int key_notify_sa_flush(const struct km_event *c)
@@ -1733,7 +1734,7 @@ static int key_notify_sa_flush(const struct km_event *c)
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
        hdr->sadb_msg_reserved = 0;
 
-       pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
+       pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
 
        return 0;
 }
@@ -1790,7 +1791,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
        out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
        if (pfk->dump.skb)
-               pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+               pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
                                &pfk->sk, sock_net(&pfk->sk));
        pfk->dump.skb = out_skb;
 
@@ -1878,7 +1879,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
                new_hdr->sadb_msg_errno = 0;
        }
 
-       pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
+       pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
        return 0;
 }
 
@@ -2206,7 +2207,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_seq = c->seq;
        out_hdr->sadb_msg_pid = c->portid;
-       pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
+       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
        return 0;
 
 }
@@ -2426,7 +2427,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
        out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-       pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
+       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
        err = 0;
 
 out:
@@ -2682,7 +2683,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
        out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
        if (pfk->dump.skb)
-               pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+               pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
                                &pfk->sk, sock_net(&pfk->sk));
        pfk->dump.skb = out_skb;
 
@@ -2739,7 +2740,7 @@ static int key_notify_policy_flush(const struct km_event *c)
        hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
        hdr->sadb_msg_reserved = 0;
-       pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
+       pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
        return 0;
 
 }
@@ -2803,7 +2804,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
        void *ext_hdrs[SADB_EXT_MAX];
        int err;
 
-       pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
+       pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
                        BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
 
        memset(ext_hdrs, 0, sizeof(ext_hdrs));
@@ -3024,7 +3025,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
        out_hdr->sadb_msg_seq = 0;
        out_hdr->sadb_msg_pid = 0;
 
-       pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+                       xs_net(x));
        return 0;
 }
 
@@ -3212,7 +3214,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
                       xfrm_ctx->ctx_len);
        }
 
-       return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+       return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+                              xs_net(x));
 }
 
 static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@@ -3408,7 +3411,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
        n_port->sadb_x_nat_t_port_port = sport;
        n_port->sadb_x_nat_t_port_reserved = 0;
 
-       return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+       return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+                              xs_net(x));
 }
 
 #ifdef CONFIG_NET_KEY_MIGRATE
@@ -3599,7 +3603,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
        }
 
        /* broadcast migrate message to sockets */
-       pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
+       pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
 
        return 0;
 
index 8708cbe8af5bbcabc5c7ea295c6a0abd15a8eb84..2b36eff5d97ea7c1cef117033c66dd35058ac4e7 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -466,3 +466,23 @@ void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif,
        rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_manage_rx_ba_offl);
+
+void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
+                                  const u8 *addr, unsigned int tid)
+{
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+       struct ieee80211_local *local = sdata->local;
+       struct sta_info *sta;
+
+       rcu_read_lock();
+       sta = sta_info_get_bss(sdata, addr);
+       if (!sta)
+               goto unlock;
+
+       set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
+       ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+
+ unlock:
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee80211_rx_ba_timer_expired);
index 0615c2a950fab992134d0071707b5b336f6fb231..008a45ca31124ed5fa54d666fce61c7982b12a2f 100644 (file)
@@ -3700,14 +3700,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-                       return -EBUSY;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
                if (val > INT_MAX)
                        return -EINVAL;
-               po->tp_reserve = val;
-               return 0;
+               lock_sock(sk);
+               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+                       ret = -EBUSY;
+               } else {
+                       po->tp_reserve = val;
+                       ret = 0;
+               }
+               release_sock(sk);
+               return ret;
        }
        case PACKET_LOSS:
        {
index 94ba5cfab86000f70d9939db2baaa733a5338754..d516ba8178b8099f5e8e180f2e60e7a61de37811 100644 (file)
@@ -49,9 +49,9 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
                return PTR_ERR(target);
 
        t->u.kernel.target = target;
+       memset(&par, 0, sizeof(par));
        par.net       = net;
        par.table     = table;
-       par.entryinfo = NULL;
        par.target    = target;
        par.targinfo  = t->data;
        par.hook_mask = hook;
index bd24a550e0f9f114598f4a398d6efd4c72672f50..a3fa144b864871088209386fd573bded1886432f 100644 (file)
@@ -286,9 +286,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
 void qdisc_hash_add(struct Qdisc *q, bool invisible)
 {
        if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
-               struct Qdisc *root = qdisc_dev(q)->qdisc;
-
-               WARN_ON_ONCE(root == &noop_qdisc);
                ASSERT_RTNL();
                hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
                if (invisible)
index 572fe2584e48c81dbf58d90ce9d6a4ae68d2a385..c403c87aff7a44bccfdd5f07e2e00ea0698a5c90 100644 (file)
@@ -572,8 +572,10 @@ static void atm_tc_destroy(struct Qdisc *sch)
        struct atm_flow_data *flow, *tmp;
 
        pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
-       list_for_each_entry(flow, &p->flows, list)
+       list_for_each_entry(flow, &p->flows, list) {
                tcf_block_put(flow->block);
+               flow->block = NULL;
+       }
 
        list_for_each_entry_safe(flow, tmp, &p->flows, list) {
                if (flow->ref > 1)
index 481036f6b54e4730ee27fae6236277c64d3eaa1a..780db43300b16284192b24006b0ae8677adbe505 100644 (file)
@@ -1431,8 +1431,10 @@ static void cbq_destroy(struct Qdisc *sch)
         * be bound to classes which have been destroyed already. --TGR '04
         */
        for (h = 0; h < q->clhash.hashsize; h++) {
-               hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
+               hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
                        tcf_block_put(cl->block);
+                       cl->block = NULL;
+               }
        }
        for (h = 0; h < q->clhash.hashsize; h++) {
                hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
index b52f74610dc7539fd7804403a6ff41b690243df7..fd15200f86273add7d6c8c4a18aaef912aba7411 100644 (file)
@@ -1428,6 +1428,10 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
                return err;
        q->eligible = RB_ROOT;
 
+       err = tcf_block_get(&q->root.block, &q->root.filter_list);
+       if (err)
+               goto err_tcf;
+
        q->root.cl_common.classid = sch->handle;
        q->root.refcnt  = 1;
        q->root.sched   = q;
@@ -1447,6 +1451,10 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
        qdisc_watchdog_init(&q->watchdog, sch);
 
        return 0;
+
+err_tcf:
+       qdisc_class_hash_destroy(&q->clhash);
+       return err;
 }
 
 static int
@@ -1522,8 +1530,10 @@ hfsc_destroy_qdisc(struct Qdisc *sch)
        unsigned int i;
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
+               hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
                        tcf_block_put(cl->block);
+                       cl->block = NULL;
+               }
        }
        for (i = 0; i < q->clhash.hashsize; i++) {
                hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
index 203286ab442799a07808bd8f73af9f07b0482cbd..5d65ec5207e91202d501a83c983793f2e923f075 100644 (file)
@@ -1258,8 +1258,10 @@ static void htb_destroy(struct Qdisc *sch)
        tcf_block_put(q->block);
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
+               hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
                        tcf_block_put(cl->block);
+                       cl->block = NULL;
+               }
        }
        for (i = 0; i < q->clhash.hashsize; i++) {
                hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
index f80ea2cc5f1f4bdee2452c930e3650f79397f3a1..82469ef9655eb65431477215bb2df0fb2737f905 100644 (file)
@@ -437,6 +437,7 @@ congestion_drop:
                qdisc_drop(head, sch, to_free);
 
                slot_queue_add(slot, skb);
+               qdisc_tree_reduce_backlog(sch, 0, delta);
                return NET_XMIT_CN;
        }
 
@@ -468,8 +469,10 @@ enqueue:
        /* Return Congestion Notification only if we dropped a packet
         * from this flow.
         */
-       if (qlen != slot->qlen)
+       if (qlen != slot->qlen) {
+               qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
                return NET_XMIT_CN;
+       }
 
        /* As we dropped a packet, better let upper stack know this */
        qdisc_tree_reduce_backlog(sch, 1, dropped);
index d174ee3254eecb523dbc86061d80a1e812dcc305..767e0537dde5a8d0cfac97f6b11ee1d568ef87ed 100644 (file)
@@ -596,7 +596,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
        rcu_read_lock();
        b = rcu_dereference_rtnl(dev->tipc_ptr);
        if (likely(b && test_bit(0, &b->up) &&
-                  (skb->pkt_type <= PACKET_BROADCAST))) {
+                  (skb->pkt_type <= PACKET_MULTICAST))) {
                skb->next = NULL;
                tipc_rcv(dev_net(dev), skb, b);
                rcu_read_unlock();
index ab3087687a32446ffa3bbfaccf206028886e6945..dcd90e6fa7c39c962eb6f6684cc31dd2b2443e4a 100644 (file)
@@ -513,6 +513,7 @@ bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 
        /* Now reverse the concerned fields */
        msg_set_errcode(hdr, err);
+       msg_set_non_seq(hdr, 0);
        msg_set_origport(hdr, msg_destport(&ohdr));
        msg_set_destport(hdr, msg_origport(&ohdr));
        msg_set_destnode(hdr, msg_prevnode(&ohdr));
index aeef8011ac7d82d828289f4085efe3acaa8a3945..9b4dcb6a16b50eefc04167dfdd1e509546b71bf6 100644 (file)
@@ -1455,10 +1455,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        /* Initiate synch mode if applicable */
        if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
                syncpt = iseqno + exp_pkts - 1;
-               if (!tipc_link_is_up(l)) {
-                       tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+               if (!tipc_link_is_up(l))
                        __tipc_node_link_up(n, bearer_id, xmitq);
-               }
                if (n->state == SELF_UP_PEER_UP) {
                        n->sync_point = syncpt;
                        tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
index a536760a94c26521a04cbe4233b1d5fa4b69e625..45c1336c6597f1b49a343b8629613fb979b0ef8b 100644 (file)
@@ -47,10 +47,10 @@ config SND_SEQ_HRTIMER_DEFAULT
          timer.
 
 config SND_SEQ_MIDI_EVENT
-       def_tristate SND_RAWMIDI
+       tristate
 
 config SND_SEQ_MIDI
-       tristate
+       def_tristate SND_RAWMIDI
        select SND_SEQ_MIDI_EVENT
 
 config SND_SEQ_MIDI_EMUL
index 272c55fe17c88aec700a7552da4473349c4ce359..ea2d0ae85bd367d5ea70068ee74d925a349789c3 100644 (file)
@@ -1502,16 +1502,11 @@ static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
 static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
 {
        struct snd_seq_queue_info *info = arg;
-       int result;
        struct snd_seq_queue *q;
 
-       result = snd_seq_queue_alloc(client->number, info->locked, info->flags);
-       if (result < 0)
-               return result;
-
-       q = queueptr(result);
-       if (q == NULL)
-               return -EINVAL;
+       q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
+       if (IS_ERR(q))
+               return PTR_ERR(q);
 
        info->queue = q->queue;
        info->locked = q->locked;
@@ -1521,7 +1516,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
        if (!info->name[0])
                snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
        strlcpy(q->name, info->name, sizeof(q->name));
-       queuefree(q);
+       snd_use_lock_free(&q->use_lock);
 
        return 0;
 }
index 450c5187eecb6bb083736d2d2a1aad43b98c7c3f..79e0c5604ef806d62eca084293e66222b8fe6828 100644 (file)
@@ -184,22 +184,26 @@ void __exit snd_seq_queues_delete(void)
 static void queue_use(struct snd_seq_queue *queue, int client, int use);
 
 /* allocate a new queue -
- * return queue index value or negative value for error
+ * return pointer to new queue or ERR_PTR(-errno) for error
+ * The new queue's use_lock is set to 1. It is the caller's responsibility to
+ * call snd_use_lock_free(&q->use_lock).
  */
-int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
+struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
 {
        struct snd_seq_queue *q;
 
        q = queue_new(client, locked);
        if (q == NULL)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        q->info_flags = info_flags;
        queue_use(q, client, 1);
+       snd_use_lock_use(&q->use_lock);
        if (queue_list_add(q) < 0) {
+               snd_use_lock_free(&q->use_lock);
                queue_delete(q);
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        }
-       return q->queue;
+       return q;
 }
 
 /* delete a queue - queue must be owned by the client */
index 30c8111477f61ed26987dc03abbe9670b36db221..719093489a2c4eec57fed70d4ba2b862cb64a9cf 100644 (file)
@@ -71,7 +71,7 @@ void snd_seq_queues_delete(void);
 
 
 /* create new queue (constructor) */
-int snd_seq_queue_alloc(int client, int locked, unsigned int flags);
+struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags);
 
 /* delete queue (destructor) */
 int snd_seq_queue_delete(int client, int queueid);
index dc585959ca32c8cfbfa3aff0557a4e20c7ec1430..a2b56b188be4d90d9c51547e5fd8a7013700b14f 100644 (file)
@@ -698,10 +698,18 @@ static int copy_gctl(struct snd_emu10k1 *emu,
 {
        struct snd_emu10k1_fx8010_control_old_gpr __user *octl;
 
-       if (emu->support_tlv)
-               return copy_from_user(gctl, &_gctl[idx], sizeof(*gctl));
+       if (emu->support_tlv) {
+               if (in_kernel)
+                       memcpy(gctl, (void *)&_gctl[idx], sizeof(*gctl));
+               else if (copy_from_user(gctl, &_gctl[idx], sizeof(*gctl)))
+                       return -EFAULT;
+               return 0;
+       }
+
        octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)_gctl;
-       if (copy_from_user(gctl, &octl[idx], sizeof(*octl)))
+       if (in_kernel)
+               memcpy(gctl, (void *)&octl[idx], sizeof(*octl));
+       else if (copy_from_user(gctl, &octl[idx], sizeof(*octl)))
                return -EFAULT;
        gctl->tlv = NULL;
        return 0;
index a91a9ef00c40611db8f19ffcd14d414cd7d42d71..217bb582aff16a6ec428311fddc53370e0355288 100644 (file)
@@ -6647,7 +6647,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
                ALC225_STANDARD_PINS,
                {0x12, 0xb7a60130},
-               {0x13, 0xb8a61140},
                {0x17, 0x90170110}),
        {}
 };
index 082736c539bc14eec73a1afad11e2a3cd5577d89..e630813c5008627d50e41637e03eeaf8ef2bf0c3 100644 (file)
@@ -542,6 +542,8 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
 
        if (size < sizeof(scale))
                return -ENOMEM;
+       if (cval->min_mute)
+               scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
        scale[2] = cval->dBmin;
        scale[3] = cval->dBmax;
        if (copy_to_user(_tlv, scale, sizeof(scale)))
index 3417ef347e40432482b84de271a3bb98c8724297..2b4b067646ab099653fe7ea79d9af1570e2971f6 100644 (file)
@@ -64,6 +64,7 @@ struct usb_mixer_elem_info {
        int cached;
        int cache_val[MAX_CHANNELS];
        u8 initialized;
+       u8 min_mute;
        void *private_data;
 };
 
index e3d1dec48ee49f21d4efe4627fc3c264d76fcfe2..e1e7ce9ab217f6f716fc95da5d1544279c0d1b75 100644 (file)
@@ -1878,6 +1878,12 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
                if (unitid == 7 && cval->control == UAC_FU_VOLUME)
                        snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
                break;
+       /* lowest playback value is muted on C-Media devices */
+       case USB_ID(0x0d8c, 0x000c):
+       case USB_ID(0x0d8c, 0x0014):
+               if (strstr(kctl->id.name, "Playback"))
+                       cval->min_mute = 1;
+               break;
        }
 }
 
index d7b0b0a3a2db55617a908e2fe4a8a2af90082e02..6a03f9697039cc3d157bba92d5253b328e6ad45d 100644 (file)
@@ -1142,6 +1142,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
        case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+       case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
        case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
        case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
        case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
@@ -1374,6 +1375,10 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
                        }
                }
                break;
+       case USB_ID(0x16d0, 0x0a23):
+               if (fp->altsetting == 2)
+                       return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+               break;
 
        default:
                break;
index e2fbb890aef9549281318a4241a2e5a8ad68b9d1..7c647f619d63faf3eff4c135c1b302565b7f78d5 100644 (file)
@@ -14,7 +14,7 @@ all:
        done
 
 override define RUN_TESTS
-       @if [ `dirname $(OUTPUT)` = $(PWD) ]; then ./run.sh; fi
+       $(OUTPUT)/run.sh
 endef
 
 override define INSTALL_RULE
old mode 100644 (file)
new mode 100755 (executable)
index 8cecae9..7956ea3
@@ -473,8 +473,8 @@ usage()
        echo "    all     Runs all tests (default)"
        echo "    -t      Run test ID the number amount of times is recommended"
        echo "    -w      Watch test ID run until it runs into an error"
-       echo "    -c      Run test ID once"
-       echo "    -s      Run test ID x test-count number of times"
+       echo "    -s      Run test ID once"
+       echo "    -c      Run test ID x test-count number of times"
        echo "    -l      List all test ID list"
        echo " -h|--help  Help"
        echo
old mode 100644 (file)
new mode 100755 (executable)
index e8c61830825a4ba1fedc9b4949561f5c61b185a5..22312eb4c9419a5fc3ee7054b4a0b604f7cdafce 100644 (file)
@@ -229,10 +229,9 @@ static void init_test(void)
        printf("CLOCK_MONOTONIC_RAW+CLOCK_MONOTONIC precision: %.0f ns\t\t",
               1e9 * precision);
 
-       if (precision > MAX_PRECISION) {
-               printf("[SKIP]\n");
-               ksft_exit_skip();
-       }
+       if (precision > MAX_PRECISION)
+               ksft_exit_skip("precision: %.0f ns > MAX_PRECISION: %.0f ns\n",
+                               1e9 * precision, 1e9 * MAX_PRECISION);
 
        printf("[OK]\n");
        srand(ts.tv_sec ^ ts.tv_nsec);