]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag '20160610_uvc_compat_for_linus' of git://git.kernel.org/pub/scm/linux/kerne...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 11 Jun 2016 17:55:30 +0000 (10:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 11 Jun 2016 17:55:30 +0000 (10:55 -0700)
Pull uvc compat XU ioctl fixes from Andy Lutomirski:
 "uvc's compat XU ioctls go through tons of potentially buggy
  indirection.  The first patch removes the indirection.  The second one
  cleans up the code.

  Compile-tested only.  I have the hardware, but I have absolutely no
  idea what XU does, how to use it, what software to recompile as
  32-bit, or what to test in that software"

* tag '20160610_uvc_compat_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/luto/linux:
  uvc_v4l2: Simplify compat ioctl implementation
  uvc: Forward compat ioctls to their handlers directly

301 files changed:
Documentation/devicetree/bindings/hwmon/ina2xx.txt
MAINTAINERS
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/abilis_tb100.dtsi
arch/arc/boot/dts/abilis_tb101.dtsi
arch/arc/boot/dts/axc001.dtsi
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/eznps.dts
arch/arc/boot/dts/nsim_700.dts
arch/arc/boot/dts/nsimosci.dts
arch/arc/boot/dts/nsimosci_hs.dts
arch/arc/boot/dts/nsimosci_hs_idu.dts
arch/arc/boot/dts/skeleton.dtsi
arch/arc/boot/dts/skeleton_hs.dtsi
arch/arc/boot/dts/skeleton_hs_idu.dtsi
arch/arc/boot/dts/vdk_axc003.dtsi
arch/arc/boot/dts/vdk_axc003_idu.dtsi
arch/arc/include/asm/atomic.h
arch/arc/include/asm/entry-compact.h
arch/arc/include/asm/mmu_context.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/processor.h
arch/arc/include/asm/smp.h
arch/arc/include/asm/spinlock.h
arch/arc/include/asm/thread_info.h
arch/arc/include/asm/uaccess.h
arch/arc/include/uapi/asm/swab.h
arch/arc/kernel/entry-compact.S
arch/arc/kernel/intc-compact.c
arch/arc/kernel/perf_event.c
arch/arc/kernel/setup.c
arch/arc/kernel/signal.c
arch/arc/kernel/troubleshoot.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arm/mach-vexpress/spc.c
arch/arm64/mm/fault.c
arch/powerpc/include/asm/nohash/64/pgalloc.h
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/platforms/512x/clock-commonclk.c
arch/powerpc/platforms/cell/spufs/coredump.c
arch/powerpc/platforms/pseries/iommu.c
arch/x86/boot/Makefile
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/intel-family.h [new file with mode: 0644]
arch/x86/include/asm/msr.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/traps.c
drivers/acpi/bus.c
drivers/acpi/ec.c
drivers/acpi/internal.h
drivers/clk/Kconfig
drivers/clk/microchip/clk-pic32mzda.c
drivers/cpufreq/intel_pstate.c
drivers/edac/edac_mc.c
drivers/edac/sb_edac.c
drivers/firmware/efi/arm-init.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/cgs_common.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_rd.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nv04_fbcon.c
drivers/gpu/drm/nouveau/nv50_fbcon.c
drivers/gpu/drm/nouveau/nvc0_fbcon.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/hdmi5.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_regs.h
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/lm90.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/device.c
drivers/infiniband/core/iwpm_msg.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/trace.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/usnic/usnic_uiom.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.h
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/realtek/rtlwifi/core.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/btrfs/ctree.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/free-space-cache.c
fs/btrfs/hash.c
fs/btrfs/hash.h
fs/btrfs/super.c
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/tests/btrfs-tests.h
fs/btrfs/tests/extent-buffer-tests.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/free-space-tests.c
fs/btrfs/tests/free-space-tree-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/volumes.c
fs/coredump.c
fs/dcache.c
fs/ecryptfs/kthread.c
fs/namei.c
fs/namespace.c
fs/proc/root.c
include/asm-generic/qspinlock.h
include/linux/binfmts.h
include/linux/clk-provider.h
include/linux/cpuidle.h
include/linux/efi.h
include/linux/mlx5/device.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/mlx5/vport.h
include/linux/seqlock.h
include/net/compat.h
include/net/ip_vs.h
include/net/netfilter/nf_queue.h
include/net/netns/netfilter.h
include/net/pkt_cls.h
include/net/sch_generic.h
include/rdma/ib_verbs.h
include/uapi/linux/btrfs.h
include/uapi/linux/gtp.h
include/uapi/sound/Kbuild
kernel/events/core.c
kernel/futex.c
kernel/locking/mutex.c
kernel/locking/qspinlock.c
kernel/relay.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/idle.c
kernel/sched/stats.h
kernel/trace/bpf_trace.c
mm/fadvise.c
mm/hugetlb.c
mm/kasan/kasan.c
mm/memcontrol.c
mm/swap.c
mm/swap_state.c
net/bridge/br_fdb.c
net/compat.c
net/core/gen_stats.c
net/core/net-sysfs.c
net/ipv4/udp.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/nf_dup_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/mac80211/mesh.c
net/mac80211/sta_info.h
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_irc.c
net/netfilter/nf_conntrack_sane.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_conntrack_tftp.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_queue.c
net/netfilter/x_tables.c
net/packet/af_packet.c
net/rds/rds.h
net/rds/recv.c
net/rds/send.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/threads.c
net/rxrpc/rxkad.c
net/sched/act_police.c
net/sched/cls_flower.c
net/sched/cls_u32.c
net/sched/sch_drr.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c
net/sched/sch_ingress.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_tbf.c
net/tipc/netlink_compat.c
net/wireless/core.c
net/wireless/wext-core.c
scripts/mod/file2alias.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
tools/perf/util/data-convert-bt.c
tools/perf/util/event.c
tools/perf/util/symbol.c
tools/testing/selftests/net/reuseport_bpf.c

index 9bcd5e87830d23882fa930c3b5214a94252f7957..02af0d94e9217d19e8bbec945ec10e338b6eeafd 100644 (file)
@@ -7,6 +7,7 @@ Required properties:
        - "ti,ina220" for ina220
        - "ti,ina226" for ina226
        - "ti,ina230" for ina230
+       - "ti,ina231" for ina231
 - reg: I2C address
 
 Optional properties:
index ed42cb65a19b517f4ab680d97196def8c6b8d6bc..2ebe195951af8e61736f80064e75142c8b11d4f5 100644 (file)
@@ -3086,6 +3086,7 @@ M:        Stephen Boyd <sboyd@codeaurora.org>
 L:     linux-clk@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/clock/
 F:     drivers/clk/
 X:     drivers/clk/clkdev.c
 F:     include/linux/clk-pr*
@@ -8008,6 +8009,7 @@ Q:        http://patchwork.kernel.org/project/linux-wireless/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/net/wireless/
 F:     drivers/net/wireless/
 
 NETXEN (1/10) GbE SUPPORT
index 0dcbacfdea4b98ca7805624a5ffb59b9298c4992..0d3e59f56974816f71140e2d0c51245f92a33768 100644 (file)
@@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
 config ARCH_DISCONTIGMEM_ENABLE
-       def_bool y
+       def_bool n
 
 config ARCH_FLATMEM_ENABLE
        def_bool y
@@ -186,9 +186,6 @@ if SMP
 config ARC_HAS_COH_CACHES
        def_bool n
 
-config ARC_HAS_REENTRANT_IRQ_LV2
-       def_bool n
-
 config ARC_MCIP
        bool "ARConnect Multicore IP (MCIP) Support "
        depends on ISA_ARCV2
@@ -366,25 +363,10 @@ config NODES_SHIFT
 if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
-       bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+       bool "Setup Timer IRQ as high Priority"
        default n
-       # Timer HAS to be high priority, for any other high priority config
-       select ARC_IRQ3_LV2
        # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
-       depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
-
-if ARC_COMPACT_IRQ_LEVELS
-
-config ARC_IRQ3_LV2
-       bool
-
-config ARC_IRQ5_LV2
-       bool
-
-config ARC_IRQ6_LV2
-       bool
-
-endif  #ARC_COMPACT_IRQ_LEVELS
+       depends on !SMP
 
 config ARC_FPU_SAVE_RESTORE
        bool "Enable FPU state persistence across context switch"
@@ -407,11 +389,6 @@ config ARC_HAS_LLSC
        default y
        depends on !ARC_CANT_LLSC
 
-config ARC_STAR_9000923308
-       bool "Workaround for llock/scond livelock"
-       default n
-       depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
-
 config ARC_HAS_SWAPE
        bool "Insn: SWAPE (endian-swap)"
        default y
@@ -471,7 +448,7 @@ config LINUX_LINK_BASE
 
 config HIGHMEM
        bool "High Memory Support"
-       select DISCONTIGMEM
+       select ARCH_DISCONTIGMEM_ENABLE
        help
          With ARC 2G:2G address split, only upper 2G is directly addressable by
          kernel. Enable this to potentially allow access to rest of 2G and PAE
index 02fabef2891cd8bf1a6eda36bcf93549eea16158..d4df6be66d583d8708585013c07020fbde28b658 100644 (file)
@@ -127,7 +127,7 @@ libs-y              += arch/arc/lib/ $(LIBGCC)
 
 boot           := arch/arc/boot
 
-#default target for make without any arguements.
+#default target for make without any arguments.
 KBUILD_IMAGE   := bootpImage
 
 all:   $(KBUILD_IMAGE)
index 3942634f805ade1545d30dd066d2ba88cdec3cac..02410b2114334466572c05e651b6227b02e415f0 100644 (file)
@@ -23,8 +23,6 @@
 
 
 / {
-       clock-frequency         = <500000000>;  /* 500 MHZ */
-
        soc100 {
                bus-frequency   = <166666666>;
 
index b0467229a5c45e9c10eb1cc2884b84b33ef2e603..f9e7686044ebee0a4c49e1d4824810e49747256e 100644 (file)
@@ -23,8 +23,6 @@
 
 
 / {
-       clock-frequency         = <500000000>;  /* 500 MHZ */
-
        soc100 {
                bus-frequency   = <166666666>;
 
index 3e02f152edcb5eac50870720be8ffe626cb1b1b6..6ae2c476ad825aee57fadb1e557b95a0bb082647 100644 (file)
@@ -15,7 +15,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <750000000>;  /* 750 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
 
index 378e455a94c4a86aa2af9a44a4d83c2cbbf771c2..14df46f141bf3450b8df0660bc0f248beef4669b 100644 (file)
@@ -14,7 +14,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <90000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index 64c94b2860ab569d03d4672db417ad1e3307f86a..3d6cfa32bf5142a50ff95b6f7e69871526172deb 100644 (file)
@@ -14,7 +14,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <90000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index b89f6c3eb35208e2686cf73017a08686a6cfad5e..1e0d225791c1250252e76697187a7d0ba8eb26ec 100644 (file)
@@ -18,7 +18,6 @@
 
 / {
        compatible = "ezchip,arc-nps";
-       clock-frequency = <83333333>;   /* 83.333333 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&intc>;
index 5d5e373e0ebc579c7f734d42212965c8187fa003..63970513e4aee2ceaadc2265eae1393115a0d2b6 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsim";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
index b5b060adce8a60dbde7778f0765720bd4617e724..763d66c883da7fea61354186781a25f63d7b89fc 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsimosci";
-       clock-frequency = <20000000>;   /* 20 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
index 325e73090a18157ca286a2916c8950d760abaf8e..4eb97c584b18b666476dd4002444e044d2e8f1cd 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsimosci_hs";
-       clock-frequency = <20000000>;   /* 20 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
index ee03d71265816db05c43841d8a0e0981c6ca62c4..853f897eb2a328c9dc7fcc70c969b387dbf14918 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsimosci_hs";
-       clock-frequency = <5000000>;    /* 5 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
index 3a10cc633e2b0e200b8eacfd16ff99e56a718652..65808fe0a290be15ded0903ad347540d0f35a52e 100644 (file)
@@ -13,7 +13,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        chosen { };
index 71fd308a9298aac6435d67e6863314d26e140396..2dfe8037dfbb34ac680597619ecef27319bed1a2 100644 (file)
@@ -8,7 +8,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        chosen { };
index d1cb25a6698952b461925d5a361223c8bc3c5080..4c11079f3565a3decc7f6401a92950df16565411 100644 (file)
@@ -8,7 +8,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        chosen { };
index ad4ee43bd2ac7700ee93d1c05ee7c84e474c4829..0fd6ba985b164b7752c26544e5d4b6d9684c88be 100644 (file)
@@ -14,7 +14,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <50000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index a3cb6263c581ea8a030bad036ec1ddebfc2b25f9..82214cd7ba0ca7406483ad80ad0cac88425b7a68 100644 (file)
@@ -15,7 +15,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <50000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index 5f3dcbbc0cc9c14df199c55ff9e916b64be3be0d..dd683995bc9d119136807adcbf71b189714fe359 100644 (file)
 
 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
-#ifdef CONFIG_ARC_STAR_9000923308
-
-#define SCOND_FAIL_RETRY_VAR_DEF                                               \
-       unsigned int delay = 1, tmp;                                            \
-
-#define SCOND_FAIL_RETRY_ASM                                                   \
-       "       bz      4f                      \n"                             \
-       "   ; --- scond fail delay ---          \n"                             \
-       "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
-       "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
-       "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
-       "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
-       "       b       1b                      \n"     /* start over */        \
-       "4: ; --- success ---                   \n"                             \
-
-#define SCOND_FAIL_RETRY_VARS                                                  \
-         ,[delay] "+&r" (delay),[tmp] "=&r"    (tmp)                           \
-
-#else  /* !CONFIG_ARC_STAR_9000923308 */
-
-#define SCOND_FAIL_RETRY_VAR_DEF
-
-#define SCOND_FAIL_RETRY_ASM                                                   \
-       "       bnz     1b                      \n"                             \
-
-#define SCOND_FAIL_RETRY_VARS
-
-#endif
-
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
-       unsigned int val;                                               \
-       SCOND_FAIL_RETRY_VAR_DEF                                        \
+       unsigned int val;                                               \
                                                                        \
        __asm__ __volatile__(                                           \
        "1:     llock   %[val], [%[ctr]]                \n"             \
        "       " #asm_op " %[val], %[val], %[i]        \n"             \
        "       scond   %[val], [%[ctr]]                \n"             \
-       "                                               \n"             \
-       SCOND_FAIL_RETRY_ASM                                            \
-                                                                       \
+       "       bnz     1b                              \n"             \
        : [val] "=&r"   (val) /* Early clobber to prevent reg reuse */  \
-         SCOND_FAIL_RETRY_VARS                                         \
        : [ctr] "r"     (&v->counter), /* Not "m": llock only supports reg direct addr mode */  \
          [i]   "ir"    (i)                                             \
        : "cc");                                                        \
@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v)                    \
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
 static inline int atomic_##op##_return(int i, atomic_t *v)             \
 {                                                                      \
-       unsigned int val;                                               \
-       SCOND_FAIL_RETRY_VAR_DEF                                        \
+       unsigned int val;                                               \
                                                                        \
        /*                                                              \
         * Explicit full memory barrier needed before/after as          \
@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v)           \
        "1:     llock   %[val], [%[ctr]]                \n"             \
        "       " #asm_op " %[val], %[val], %[i]        \n"             \
        "       scond   %[val], [%[ctr]]                \n"             \
-       "                                               \n"             \
-       SCOND_FAIL_RETRY_ASM                                            \
-                                                                       \
+       "       bnz     1b                              \n"             \
        : [val] "=&r"   (val)                                           \
-         SCOND_FAIL_RETRY_VARS                                         \
        : [ctr] "r"     (&v->counter),                                  \
          [i]   "ir"    (i)                                             \
        : "cc");                                                        \
index e0e1faf03c50177f69232f2990dd5e4a4ac19b6b..14c310f2e0b198ecac4d4d9f19a33d80b258005b 100644 (file)
@@ -76,8 +76,8 @@
         * We need to be a bit more cautious here. What if a kernel bug in
         * L1 ISR, caused SP to go whaco (some small value which looks like
         * USER stk) and then we take L2 ISR.
-        * Above brlo alone would treat it as a valid L1-L2 sceanrio
-        * instead of shouting alound
+        * Above brlo alone would treat it as a valid L1-L2 scenario
+        * instead of shouting around
         * The only feasible way is to make sure this L2 happened in
         * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
         * L1 ISR before it switches stack
index 1fd467ef658fe861b34f36247674d8f3e167acad..b0b87f2447f5248b3b2bcf1e13b14a5ad85d2d44 100644 (file)
@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
                local_flush_tlb_all();
 
                /*
-                * Above checke for rollover of 8 bit ASID in 32 bit container.
+                * Above check for rollover of 8 bit ASID in 32 bit container.
                 * If the container itself wrapped around, set it to a non zero
                 * "generation" to distinguish from no context
                 */
index 034bbdc0ff61c3bffadd0284d776240d8137767d..858f98ef7f1ba3b1fd1ccda9298f2484f7749418 100644 (file)
@@ -47,7 +47,7 @@
  * Page Tables are purely for Linux VM's consumption and the bits below are
  * suited to that (uniqueness). Hence some are not implemented in the TLB and
  * some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
  *      seperate PD0 and PD1, which combined forms a translation entry)
  *      while for PTE perspective, they are 8 and 9 respectively
  * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
index f9048994b22f857a487205de5b5a25c4d3dc1f5f..16b630fbeb6acfb09fc59d20c4f965a0bb3e59aa 100644 (file)
@@ -78,7 +78,7 @@ struct task_struct;
 #define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
 
 /*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
  * Look in process.c for details of kernel stack layout
  */
 #define TSK_K_ESP(tsk)         (tsk->thread.ksp)
index 991380438d6bfd6af03e6fff4748b8c5342b6a70..89fdd1b0a76ebe672094daa10134204cb96cf925 100644 (file)
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
  * (1) These insn were introduced only in 4.10 release. So for older released
  *     support needed.
  *
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
  *     gaurantted by the platform (not something which core handles).
  *     Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
  *     disabling for atomicity.
index 800e7c430ca589724c9467a3a9689aad6faf6bc3..cded4a9b543822d5ff9e2099fb05c67bdfc8e84a 100644 (file)
 
 #ifdef CONFIG_ARC_HAS_LLSC
 
-/*
- * A normal LLOCK/SCOND based system, w/o need for livelock workaround
- */
-#ifndef CONFIG_ARC_STAR_9000923308
-
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
        unsigned int val;
@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
        smp_mb();
 }
 
-#else  /* CONFIG_ARC_STAR_9000923308 */
-
-/*
- * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
- * coherency transactions in the SCU. The exclusive line state keeps rotating
- * among contenting cores leading to a never ending cycle. So break the cycle
- * by deferring the retry of failed exclusive access (SCOND). The actual delay
- * needed is function of number of contending cores as well as the unrelated
- * coherency traffic from other cores. To keep the code simple, start off with
- * small delay of 1 which would suffice most cases and in case of contention
- * double the delay. Eventually the delay is sufficient such that the coherency
- * pipeline is drained, thus a subsequent exclusive access would succeed.
- */
-
-#define SCOND_FAIL_RETRY_VAR_DEF                                               \
-       unsigned int delay, tmp;                                                \
-
-#define SCOND_FAIL_RETRY_ASM                                                   \
-       "   ; --- scond fail delay ---          \n"                             \
-       "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
-       "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
-       "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
-       "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
-       "       b       1b                      \n"     /* start over */        \
-       "                                       \n"                             \
-       "4: ; --- done ---                      \n"                             \
-
-#define SCOND_FAIL_RETRY_VARS                                                  \
-         ,[delay] "=&r" (delay), [tmp] "=&r"   (tmp)                           \
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-       unsigned int val;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[slock]]      \n"
-       "       breq    %[val], %[LOCKED], 0b   \n"     /* spin while LOCKED */
-       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
-       "       bz      4f                      \n"     /* done */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val)
-         SCOND_FAIL_RETRY_VARS
-       : [slock]       "r"     (&(lock->slock)),
-         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
-       unsigned int val, got_it = 0;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[slock]]      \n"
-       "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
-       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
-       "       bz.d    4f                      \n"
-       "       mov.z   %[got_it], 1            \n"     /* got it */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val),
-         [got_it]      "+&r"   (got_it)
-         SCOND_FAIL_RETRY_VARS
-       : [slock]       "r"     (&(lock->slock)),
-         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
-       : "memory", "cc");
-
-       smp_mb();
-
-       return got_it;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-       smp_mb();
-
-       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
-       smp_mb();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
- */
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       /*
-        * zero means writer holds the lock exclusively, deny Reader.
-        * Otherwise grant lock to first/subseq reader
-        *
-        *      if (rw->counter > 0) {
-        *              rw->counter--;
-        *              ret = 1;
-        *      }
-        */
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brls    %[val], %[WR_LOCKED], 0b\n"     /* <= 0: spin while write locked */
-       "       sub     %[val], %[val], 1       \n"     /* reader lock */
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz      4f                      \n"     /* done */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
-       unsigned int val, got_it = 0;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
-       "       sub     %[val], %[val], 1       \n"     /* counter-- */
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz.d    4f                      \n"
-       "       mov.z   %[got_it], 1            \n"     /* got it */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val),
-         [got_it]      "+&r"   (got_it)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-
-       return got_it;
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       /*
-        * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
-        * deny writer. Otherwise if unlocked grant to writer
-        * Hence the claim that Linux rwlocks are unfair to writers.
-        * (can be starved for an indefinite time by readers).
-        *
-        *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
-        *              rw->counter = 0;
-        *              ret = 1;
-        *      }
-        */
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brne    %[val], %[UNLOCKED], 0b \n"     /* while !UNLOCKED spin */
-       "       mov     %[val], %[WR_LOCKED]    \n"
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz      4f                      \n"
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
-       unsigned int val, got_it = 0;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
-       "       mov     %[val], %[WR_LOCKED]    \n"
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz.d    4f                      \n"
-       "       mov.z   %[got_it], 1            \n"     /* got it */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val),
-         [got_it]      "+&r"   (got_it)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-
-       return got_it;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-
-       smp_mb();
-
-       /*
-        * rw->counter++;
-        */
-       __asm__ __volatile__(
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       add     %[val], %[val], 1       \n"
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bnz     1b                      \n"
-       "                                       \n"
-       : [val]         "=&r"   (val)
-       : [rwlock]      "r"     (&(rw->counter))
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-
-       smp_mb();
-
-       /*
-        * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
-        */
-       __asm__ __volatile__(
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       scond   %[UNLOCKED], [%[rwlock]]\n"
-       "       bnz     1b                      \n"
-       "                                       \n"
-       : [val]         "=&r"   (val)
-       : [rwlock]      "r"     (&(rw->counter)),
-         [UNLOCKED]    "r"     (__ARCH_RW_LOCK_UNLOCKED__)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
-
-#endif /* CONFIG_ARC_STAR_9000923308 */
-
 #else  /* !CONFIG_ARC_HAS_LLSC */
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
index 3af67455659af49d9f9d53b67ac26024cf3b77b9..2d79e527fa50a305e932259d336423d9d89c3fae 100644 (file)
@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
 
 /*
  * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
  * syscall, so all that reamins to be tested is _TIF_WORK_MASK
  */
 
index d1da6032b715a7fea35d71fcedeeeb8cdfe590e4..a78d5670884f32490d2341b2468bf606258b6654 100644 (file)
@@ -32,7 +32,7 @@
 #define __kernel_ok            (segment_eq(get_fs(), KERNEL_DS))
 
 /*
- * Algorthmically, for __user_ok() we want do:
+ * Algorithmically, for __user_ok() we want do:
  *     (start < TASK_SIZE) && (start+len < TASK_SIZE)
  * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
  * emitted directly in code.
index 095599a73195c35bd6aabbc301bfe8dcafd3d52a..71f3918b0fc304bd0eeadb024bcbf9fa3ba32418 100644 (file)
@@ -74,7 +74,7 @@
        __tmp ^ __in;                                           \
 })
 
-#elif (ARC_BSWAP_TYPE == 2)    /* Custom single cycle bwap instruction */
+#elif (ARC_BSWAP_TYPE == 2)    /* Custom single cycle bswap instruction */
 
 #define __arch_swab32(x)                                               \
 ({                                                                     \
index 0cb0abaa0479e53ab1ea7ef8e45ab0cd42d2e965..98812c1248dfaf85b28ed023287ae78f27286073 100644 (file)
@@ -91,27 +91,13 @@ VECTOR   mem_service             ; 0x8, Mem exception   (0x1)
 VECTOR   instr_service           ; 0x10, Instrn Error   (0x2)
 
 ; ******************** Device ISRs **********************
-#ifdef CONFIG_ARC_IRQ3_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-VECTOR   handle_interrupt_level1
-
-#ifdef CONFIG_ARC_IRQ5_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-#ifdef CONFIG_ARC_IRQ6_LV2
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
 VECTOR   handle_interrupt_level2
 #else
 VECTOR   handle_interrupt_level1
 #endif
 
-.rept   25
+.rept   28
 VECTOR   handle_interrupt_level1 ; Other devices
 .endr
 
index c5cceca36118744f8c06359bc060f250974313df..ce9deb953ca90e1a0309ccd69f9608e8fd32cf70 100644 (file)
@@ -28,10 +28,8 @@ void arc_init_IRQ(void)
 {
        int level_mask = 0;
 
-       /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
+       /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
+       level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
 
        /*
         * Write to register, even if no LV2 IRQs configured to reset it
index 6fd48021324b50acd326a4ce9a17031f05db7609..08f03d9b5b3e544fcaf3e1696b8ea8e87cb943af 100644 (file)
@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
        int64_t delta = new_raw_count - prev_raw_count;
 
        /*
-        * We don't afaraid of hwc->prev_count changing beneath our feet
+        * We aren't afraid of hwc->prev_count changing beneath our feet
         * because there's no way for us to re-enter this function anytime.
         */
        local64_set(&hwc->prev_count, new_raw_count);
index f63b8bfefb0ca789dcbac3ad0459dfc4b3e98760..2ee7a4d758a882a2b4e36cacd74adb280648fced 100644 (file)
@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
                /*
                 * If we are here, it is established that @uboot_arg didn't
                 * point to DT blob. Instead if u-boot says it is cmdline,
-                * Appent to embedded DT cmdline.
+                * append to embedded DT cmdline.
                 * setup_machine_fdt() would have populated @boot_command_line
                 */
                if (uboot_tag == 1) {
index 004b7f0bc76cc58c6988547df1bb8705cd36004d..6cb3736b6b83613a95180cd3e9cb5ba12a9b2f7f 100644 (file)
@@ -34,7 +34,7 @@
  *  -ViXS were still seeing crashes when using insmod to load drivers.
  *   It turned out that the code to change Execute permssions for TLB entries
  *   of user was not guarded for interrupts (mod_tlb_permission)
- *   This was cauing TLB entries to be overwritten on unrelated indexes
+ *   This was causing TLB entries to be overwritten on unrelated indexes
  *
  * Vineetg: July 15th 2008: Bug #94183
  *  -Exception happens in Delay slot of a JMP, and before user space resumes,
index a6f91e88ce36e3ea2a2c95d8eabdceffa48be7fc..934150e7ac4895ef9e4523fd1ffabbdacce2dd74 100644 (file)
@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-/* called on user read(): display the couters */
+/* called on user read(): display the counters */
 static ssize_t tlb_stats_output(struct file *file,     /* file descriptor */
                                char __user *user_buf,  /* user buffer */
                                size_t len,             /* length of buffer */
index 9e5eddbb856f1f32dd25f4b13132dd527725249c..5a294b2c3cb307ac591293afae8c8a72409c97c5 100644 (file)
@@ -215,7 +215,7 @@ slc_chk:
  * ------------------
  * This ver of MMU supports variable page sizes (1k-16k): although Linux will
  * only support 8k (default), 16k and 4k.
- * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * However from hardware perspective, smaller page sizes aggravate aliasing
  * meaning more vaddr bits needed to disambiguate the cache-line-op ;
  * the existing scheme of piggybacking won't work for certain configurations.
  * Two new registers IC_PTAG and DC_PTAG inttoduced.
@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
 
        /*
         * This is technically for MMU v4, using the MMU v3 programming model
-        * Special work for HS38 aliasing I-cache configuratino with PAE40
+        * Special work for HS38 aliasing I-cache configuration with PAE40
         *   - upper 8 bits of paddr need to be written into PTAG_HI
         *   - (and needs to be written before the lower 32 bits)
         * Note that PTAG_HI is hoisted outside the line loop
@@ -936,7 +936,7 @@ void arc_cache_init(void)
                              ic->ver, CONFIG_ARC_MMU_VER);
 
                /*
-                * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
+                * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
                 * pair to provide vaddr/paddr respectively, just as in MMU v3
                 */
                if (is_isa_arcv2() && ic->alias)
index 8c8e36fa5659411c6705e46d538e7c506fc42c96..73d7e4c75b7dbc9140604b334ab5e22a8cdd7a8d 100644 (file)
@@ -10,7 +10,7 @@
  * DMA Coherent API Notes
  *
  * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessintg it using a kernel virtual address, with
+ * implemented by accessing it using a kernel virtual address, with
  * Cache bit off in the TLB entry.
  *
  * The default DMA address == Phy address which is 0x8000_0000 based.
index 5766ce2be32bbd28452c95ea9e43e92de0f4dcb0..8409cab3f760cc798a14cadc5f017feab53d1b65 100644 (file)
@@ -547,7 +547,7 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
 
        init.name = dev_name(cpu_dev);
        init.ops = &clk_spc_ops;
-       init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+       init.flags = CLK_GET_RATE_NOCACHE;
        init.num_parents = 0;
 
        return devm_clk_register(cpu_dev, &spc->hw);
index 5954881a35ac516aed1bfb0844249feb551f40b3..ba3fc12bd2722b469ec31da5e133f0e7cbf92715 100644 (file)
@@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
         * PTE_RDONLY is cleared by default in the asm below, so set it in
         * back if necessary (read-only or clean PTE).
         */
-       if (!pte_write(entry) || !dirty)
+       if (!pte_write(entry) || !pte_sw_dirty(entry))
                pte_val(entry) |= PTE_RDONLY;
 
        /*
index 0c12a3bfe2ab10d49f76e24c4d1dbaa59096b2b0..069369f6414bd1443586e84eae65b259b596a515 100644 (file)
@@ -172,7 +172,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 
 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-       pte_fragment_fre((unsigned long *)pte, 1);
+       pte_fragment_free((unsigned long *)pte, 1);
 }
 
 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
index ccd2037c797f9e48c82b545ed4eb5febf03d95f6..6ee4b72cda4201840cf2b85f38661831c5e37233 100644 (file)
@@ -719,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
         * must match by the macro below. Update the definition if
         * the structure layout changes.
         */
-#define IBM_ARCH_VEC_NRCORES_OFFSET    125
+#define IBM_ARCH_VEC_NRCORES_OFFSET    133
        W(NR_CPUS),                     /* number of cores supported */
        0,
        0,
index 30a03c03fe734a8c80828fc4260fe3679ab63e85..060b140f03c69de424e40e4dcdb5064d832ce889 100644 (file)
@@ -377,7 +377,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
 
 #else
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
-                    offsetof(struct thread_fp_state, fpr[32][0]));
+                    offsetof(struct thread_fp_state, fpr[32]));
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                   &target->thread.fp_state, 0, -1);
@@ -405,7 +405,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
        return 0;
 #else
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
-                    offsetof(struct thread_fp_state, fpr[32][0]));
+                    offsetof(struct thread_fp_state, fpr[32]));
 
        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                  &target->thread.fp_state, 0, -1);
index d873f6507f7210fe4b9a7caa774861b9648ba5aa..40e05e7f43de31586a8110e81069ccce993ee8f3 100644 (file)
@@ -550,7 +550,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
                }
        }
        /* This works for all page sizes, and for 256M and 1T segments */
-       *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+       if (cpu_has_feature(CPU_FTR_ARCH_300))
+               *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
+       else
+               *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+
        shift = mmu_psize_defs[size].shift;
 
        avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
index 0fdaf93a3e091be64d827bd376f651a6c3bab8f0..54efba2fd66ed94ea70f2dd92c2413ebbf879a74 100644 (file)
@@ -117,7 +117,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
  */
 void radix__local_flush_tlb_mm(struct mm_struct *mm)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm->context.id;
@@ -130,7 +130,7 @@ EXPORT_SYMBOL(radix__local_flush_tlb_mm);
 void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                            unsigned long ap, int nid)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm ? mm->context.id : 0;
@@ -160,7 +160,7 @@ static int mm_is_core_local(struct mm_struct *mm)
 
 void radix__flush_tlb_mm(struct mm_struct *mm)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm->context.id;
@@ -185,7 +185,7 @@ EXPORT_SYMBOL(radix__flush_tlb_mm);
 void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                       unsigned long ap, int nid)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm ? mm->context.id : 0;
index c50ea76ba66ceb95c99555f7391b75c0744a1b96..6081fbd75330b8a936a6e590b7fcb9604c4a23cb 100644 (file)
@@ -221,7 +221,7 @@ static bool soc_has_mclk_mux0_canin(void)
 /* convenience wrappers around the common clk API */
 static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
 {
-       return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+       return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
 }
 
 static inline struct clk *mpc512x_clk_factor(
index 84fb984f29c130f89cffbbaaf1aae9b8b1d6c843..85c85eb3e245d7a7a9600a8fce95a592789f38bc 100644 (file)
@@ -172,7 +172,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
        if (rc < 0)
                goto out;
 
-       skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
+       skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
        if (!dump_skip(cprm, skip))
                goto Eio;
 out:
index b7dfc1359d0113d570fe9ea7f702c728814dc66f..3e8865b187de22187a39e1bc66b662e464009f14 100644 (file)
@@ -927,7 +927,7 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
        dn = pci_device_to_OF_node(dev);
        pdn = PCI_DN(dn);
        buid = pdn->phb->buid;
-       cfg_addr = (pdn->busno << 8) | pdn->devfn;
+       cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
 
        ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
                  cfg_addr, BUID_HI(buid), BUID_LO(buid));
@@ -956,7 +956,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
        dn = pci_device_to_OF_node(dev);
        pdn = PCI_DN(dn);
        buid = pdn->phb->buid;
-       cfg_addr = (pdn->busno << 8) | pdn->devfn;
+       cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
 
        do {
                /* extra outputs are LIOBN and dma-addr (hi, lo) */
index 700a9c6e6159362d9b62631d124d6703a6554ba5..be8e688fa0d48b1fe4aa5180c6afa2c2fb4e84f1 100644 (file)
@@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
        for i in lib lib64 share end ; do \
                if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
                        cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
+                       if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
+                               cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
+                       fi ; \
                        break ; \
                fi ; \
                if [ $$i = end ] ; then exit 1 ; fi ; \
index 99c4bab123cdae71b14ab92241d81dbf15616fe1..e30eef4f29a6f6034e46beab4a559f9994311c34 100644 (file)
@@ -714,7 +714,7 @@ static void cleanup_rapl_pmus(void)
        int i;
 
        for (i = 0; i < rapl_pmus->maxpkg; i++)
-               kfree(rapl_pmus->pmus + i);
+               kfree(rapl_pmus->pmus[i]);
        kfree(rapl_pmus);
 }
 
index b2625867ebd17543401cf6dbc5bf9e76fcd6ee6c..874e8bd64d1d54cdf662c5ba348f8872a0265e24 100644 (file)
@@ -2868,27 +2868,10 @@ static struct intel_uncore_type bdx_uncore_cbox = {
        .format_group           = &hswep_uncore_cbox_format_group,
 };
 
-static struct intel_uncore_type bdx_uncore_sbox = {
-       .name                   = "sbox",
-       .num_counters           = 4,
-       .num_boxes              = 4,
-       .perf_ctr_bits          = 48,
-       .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
-       .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
-       .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
-       .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
-       .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
-       .ops                    = &hswep_uncore_sbox_msr_ops,
-       .format_group           = &hswep_uncore_sbox_format_group,
-};
-
-#define BDX_MSR_UNCORE_SBOX    3
-
 static struct intel_uncore_type *bdx_msr_uncores[] = {
        &bdx_uncore_ubox,
        &bdx_uncore_cbox,
        &hswep_uncore_pcu,
-       &bdx_uncore_sbox,
        NULL,
 };
 
@@ -2897,10 +2880,6 @@ void bdx_uncore_cpu_init(void)
        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
        uncore_msr_uncores = bdx_msr_uncores;
-
-       /* BDX-DE doesn't have SBOX */
-       if (boot_cpu_data.x86_model == 86)
-               uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
 }
 
 static struct intel_uncore_type bdx_uncore_ha = {
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
new file mode 100644 (file)
index 0000000..6999f7d
--- /dev/null
@@ -0,0 +1,68 @@
+#ifndef _ASM_X86_INTEL_FAMILY_H
+#define _ASM_X86_INTEL_FAMILY_H
+
+/*
+ * "Big Core" Processors (Branded as Core, Xeon, etc...)
+ *
+ * The "_X" parts are generally the EP and EX Xeons, or the
+ * "Extreme" ones, like Broadwell-E.
+ *
+ * Things ending in "2" are usually because we have no better
+ * name for them.  There's no processor called "WESTMERE2".
+ */
+
+#define INTEL_FAM6_CORE_YONAH          0x0E
+#define INTEL_FAM6_CORE2_MEROM         0x0F
+#define INTEL_FAM6_CORE2_MEROM_L       0x16
+#define INTEL_FAM6_CORE2_PENRYN                0x17
+#define INTEL_FAM6_CORE2_DUNNINGTON    0x1D
+
+#define INTEL_FAM6_NEHALEM             0x1E
+#define INTEL_FAM6_NEHALEM_EP          0x1A
+#define INTEL_FAM6_NEHALEM_EX          0x2E
+#define INTEL_FAM6_WESTMERE            0x25
+#define INTEL_FAM6_WESTMERE2           0x1F
+#define INTEL_FAM6_WESTMERE_EP         0x2C
+#define INTEL_FAM6_WESTMERE_EX         0x2F
+
+#define INTEL_FAM6_SANDYBRIDGE         0x2A
+#define INTEL_FAM6_SANDYBRIDGE_X       0x2D
+#define INTEL_FAM6_IVYBRIDGE           0x3A
+#define INTEL_FAM6_IVYBRIDGE_X         0x3E
+
+#define INTEL_FAM6_HASWELL_CORE                0x3C
+#define INTEL_FAM6_HASWELL_X           0x3F
+#define INTEL_FAM6_HASWELL_ULT         0x45
+#define INTEL_FAM6_HASWELL_GT3E                0x46
+
+#define INTEL_FAM6_BROADWELL_CORE      0x3D
+#define INTEL_FAM6_BROADWELL_XEON_D    0x56
+#define INTEL_FAM6_BROADWELL_GT3E      0x47
+#define INTEL_FAM6_BROADWELL_X         0x4F
+
+#define INTEL_FAM6_SKYLAKE_MOBILE      0x4E
+#define INTEL_FAM6_SKYLAKE_DESKTOP     0x5E
+#define INTEL_FAM6_SKYLAKE_X           0x55
+#define INTEL_FAM6_KABYLAKE_MOBILE     0x8E
+#define INTEL_FAM6_KABYLAKE_DESKTOP    0x9E
+
+/* "Small Core" Processors (Atom) */
+
+#define INTEL_FAM6_ATOM_PINEVIEW       0x1C
+#define INTEL_FAM6_ATOM_LINCROFT       0x26
+#define INTEL_FAM6_ATOM_PENWELL                0x27
+#define INTEL_FAM6_ATOM_CLOVERVIEW     0x35
+#define INTEL_FAM6_ATOM_CEDARVIEW      0x36
+#define INTEL_FAM6_ATOM_SILVERMONT1    0x37 /* BayTrail/BYT / Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT2    0x4D /* Avaton/Rangely */
+#define INTEL_FAM6_ATOM_AIRMONT                0x4C /* CherryTrail / Braswell */
+#define INTEL_FAM6_ATOM_MERRIFIELD1    0x4A /* Tangier */
+#define INTEL_FAM6_ATOM_MERRIFIELD2    0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_GOLDMONT       0x5C
+#define INTEL_FAM6_ATOM_DENVERTON      0x5F /* Goldmont Microserver */
+
+/* Xeon Phi */
+
+#define INTEL_FAM6_XEON_PHI_KNL                0x57 /* Knights Landing */
+
+#endif /* _ASM_X86_INTEL_FAMILY_H */
index 7dc1d8fef7fdec6a633f5cd3f7ada4d2ba09e8fe..b5fee97813cdf8d1408aa7ac874b21593eef8121 100644 (file)
@@ -122,7 +122,7 @@ notrace static inline void native_write_msr(unsigned int msr,
                     "2:\n"
                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
                     : : "c" (msr), "a"(low), "d" (high) : "memory");
-       if (msr_tracepoint_active(__tracepoint_read_msr))
+       if (msr_tracepoint_active(__tracepoint_write_msr))
                do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
 }
 
@@ -141,7 +141,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
                     : "c" (msr), "0" (low), "d" (high),
                       [fault] "i" (-EIO)
                     : "memory");
-       if (msr_tracepoint_active(__tracepoint_read_msr))
+       if (msr_tracepoint_active(__tracepoint_write_msr))
                do_trace_write_msr(msr, ((u64)high << 32 | low), err);
        return err;
 }
index 84e33ff5a6d595693c3718477f168f20d5546316..446702ed99dca3edf5a8a48cccb6c2475ae9e85f 100644 (file)
@@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void)
                res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
                snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
                mem += IOAPIC_RESOURCE_NAME_SIZE;
+               ioapics[i].iomem_res = &res[num];
                num++;
-               ioapics[i].iomem_res = res;
        }
 
        ioapic_resources = res;
index c343a54bed396d2f924d4ed6d8e5ec2188c636b0..f5c69d8974e176e44a995bd6f91512c7d030b001 100644 (file)
@@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
        u64 value;
 
        /* re-enable TopologyExtensions if switched off by BIOS */
-       if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
+       if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
            !cpu_has(c, X86_FEATURE_TOPOEXT)) {
 
                if (msr_set_bit(0xc0011005, 54) > 0) {
                        rdmsrl(0xc0011005, value);
                        if (value & BIT_64(54)) {
                                set_cpu_cap(c, X86_FEATURE_TOPOEXT);
-                               pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+                               pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
                        }
                }
        }
index d1590486204a1bb52974c27793dc66139f20c4ee..00f03d82e69acfbb7c90a7d8d859516088a43e6d 100644 (file)
@@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
                local_irq_disable();
 }
 
+/*
+ * In IST context, we explicitly disable preemption.  This serves two
+ * purposes: it makes it much less likely that we would accidentally
+ * schedule in IST context and it will force a warning if we somehow
+ * manage to schedule by accident.
+ */
 void ist_enter(struct pt_regs *regs)
 {
        if (user_mode(regs)) {
@@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
                rcu_nmi_enter();
        }
 
-       /*
-        * We are atomic because we're on the IST stack; or we're on
-        * x86_32, in which case we still shouldn't schedule; or we're
-        * on x86_64 and entered from user mode, in which case we're
-        * still atomic unless ist_begin_non_atomic is called.
-        */
-       preempt_count_add(HARDIRQ_OFFSET);
+       preempt_disable();
 
        /* This code is a bit fragile.  Test it. */
        RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
@@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
 
 void ist_exit(struct pt_regs *regs)
 {
-       preempt_count_sub(HARDIRQ_OFFSET);
+       preempt_enable_no_resched();
 
        if (!user_mode(regs))
                rcu_nmi_exit();
@@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
        BUG_ON((unsigned long)(current_top_of_stack() -
                               current_stack_pointer()) >= THREAD_SIZE);
 
-       preempt_count_sub(HARDIRQ_OFFSET);
+       preempt_enable_no_resched();
 }
 
 /**
@@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
  */
 void ist_end_non_atomic(void)
 {
-       preempt_count_add(HARDIRQ_OFFSET);
+       preempt_disable();
 }
 
 static nokprobe_inline int
index 31e8da648fffb9b6db10cda340bcc394f3a95994..262ca31b86d9311c05bd840c7fb8bb0aa3b09b34 100644 (file)
@@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void)
         * Maybe EC region is required at bus_scan/acpi_get_devices. So it
         * is necessary to enable it as early as possible.
         */
-       acpi_boot_ec_enable();
+       acpi_ec_dsdt_probe();
 
        printk(KERN_INFO PREFIX "Interpreter enabled\n");
 
index 0e70181f150c9eae5e00b8f9a24a41382ee41bbc..73c76d646064700dc1bb51905b4dbc70b415aabf 100644 (file)
@@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
        return AE_OK;
 }
 
-int __init acpi_boot_ec_enable(void)
+static const struct acpi_device_id ec_device_ids[] = {
+       {"PNP0C09", 0},
+       {"", 0},
+};
+
+int __init acpi_ec_dsdt_probe(void)
 {
-       if (!boot_ec)
+       acpi_status status;
+
+       if (boot_ec)
                return 0;
+
+       /*
+        * Finding EC from DSDT if there is no ECDT EC available. When this
+        * function is invoked, ACPI tables have been fully loaded, we can
+        * walk namespace now.
+        */
+       boot_ec = make_acpi_ec();
+       if (!boot_ec)
+               return -ENOMEM;
+       status = acpi_get_devices(ec_device_ids[0].id,
+                                 ec_parse_device, boot_ec, NULL);
+       if (ACPI_FAILURE(status) || !boot_ec->handle)
+               return -ENODEV;
        if (!ec_install_handlers(boot_ec)) {
                first_ec = boot_ec;
                return 0;
@@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void)
        return -EFAULT;
 }
 
-static const struct acpi_device_id ec_device_ids[] = {
-       {"PNP0C09", 0},
-       {"", 0},
-};
-
 #if 0
 /*
  * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
index 9bb0773d39bfd14f87f0e8dbfe5a1ed31b54162d..27cc7feabfe4322dfa578c2e9af8464e4f4feaf7 100644 (file)
@@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data);
 
 int acpi_ec_init(void);
 int acpi_ec_ecdt_probe(void);
-int acpi_boot_ec_enable(void);
+int acpi_ec_dsdt_probe(void);
 void acpi_ec_block_transactions(void);
 void acpi_ec_unblock_transactions(void);
 void acpi_ec_unblock_transactions_early(void);
index 53ddba26578ce341eb3f488001a9cc267342eeda..98efbfcdb503e181d32f44a0dd0b44f286e6080a 100644 (file)
@@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE
 config COMMON_CLK_NXP
        def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
        select REGMAP_MMIO if ARCH_LPC32XX
+       select MFD_SYSCON if ARCH_LPC18XX
        ---help---
          Support for clock providers on NXP platforms.
 
index 020a29acc5b079e7c6560745804a6fa91e7d9273..51f54380474b6acbe4f39a6ea1a54c19cb4121f1 100644 (file)
@@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev)
 
        /* register fixed rate clocks */
        clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
-                                               CLK_IS_ROOT, 24000000);
+                                               0, 24000000);
        clks[FRCCLK] =  clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
-                                               CLK_IS_ROOT, 8000000);
+                                               0, 8000000);
        clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
-                                               CLK_IS_ROOT, 8000000);
+                                               0, 8000000);
        clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
-                                               CLK_IS_ROOT, 32000);
+                                               0, 32000);
        clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
-                                               CLK_IS_ROOT, 24000000);
+                                               0, 24000000);
        /* fixed rate (optional) clock */
        if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
                pr_info("pic32-clk: dt requests SOSC.\n");
index 0d159b513469111572c615a2bda79fce5b8ba150..ee367e9b7d2e94c5a1a04781b3cc2e41b5616b2f 100644 (file)
@@ -1460,6 +1460,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
        intel_pstate_clear_update_util_hook(policy->cpu);
 
+       pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+                policy->cpuinfo.max_freq, policy->max);
+
        cpu = all_cpu_data[0];
        if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
            policy->max < policy->cpuinfo.max_freq &&
@@ -1495,13 +1498,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                                   limits->max_sysfs_pct);
        limits->max_perf_pct = max(limits->min_policy_pct,
                                   limits->max_perf_pct);
-       limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
        /* Make sure min_perf_pct <= max_perf_pct */
        limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
 
        limits->min_perf = div_fp(limits->min_perf_pct, 100);
        limits->max_perf = div_fp(limits->max_perf_pct, 100);
+       limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
  out:
        intel_pstate_set_update_util_hook(policy->cpu);
@@ -1558,8 +1561,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
-       policy->cpuinfo.max_freq =
-               cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+       update_turbo_state();
+       policy->cpuinfo.max_freq = limits->turbo_disabled ?
+                       cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+       policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+
        intel_pstate_init_acpi_perf_limits(policy);
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
        cpumask_set_cpu(policy->cpu, policy->cpus);
index 6aa256b0a1ed34c2e1a4dff091c25dce5309fb34..c3ee3ad98a63834e055b0218465d3033ff127a1c 100644 (file)
@@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value)
        list_for_each(item, &mc_devices) {
                mci = list_entry(item, struct mem_ctl_info, link);
 
-               edac_mod_work(&mci->work, value);
+               if (mci->op_state == OP_RUNNING_POLL)
+                       edac_mod_work(&mci->work, value);
        }
        mutex_unlock(&mem_ctls_mutex);
 }
index b4d0bf6534cf43732df678eafd956fe39209ce03..6744d88bdea89782c524ede7d843c3d6bcd67ee2 100644 (file)
@@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
        { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 };
 
-#define RIR_RNK_TGT(reg)               GET_BITFIELD(reg, 16, 19)
-#define RIR_OFFSET(reg)                GET_BITFIELD(reg,  2, 14)
+#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
+       GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
+
+#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
+       GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
 
 /* Device 16, functions 2-7 */
 
@@ -326,6 +329,7 @@ struct pci_id_descr {
 struct pci_id_table {
        const struct pci_id_descr       *descr;
        int                             n_devs;
+       enum type                       type;
 };
 
 struct sbridge_dev {
@@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = {
        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0)          },
 };
 
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+#define PCI_ID_TABLE_ENTRY(A, T) {     \
+       .descr = A,                     \
+       .n_devs = ARRAY_SIZE(A),        \
+       .type = T                       \
+}
+
 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_knl_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_knl),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
        {0,}
 };
 
@@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
                                pci_read_config_dword(pvt->pci_tad[i],
                                                      rir_offset[j][k],
                                                      &reg);
-                               tmp_mb = RIR_OFFSET(reg) << 6;
+                               tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
 
                                gb = div_u64_rem(tmp_mb, 1024, &mb);
                                edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
                                         i, j, k,
                                         gb, (mb*1000)/1024,
                                         ((u64)tmp_mb) << 20L,
-                                        (u32)RIR_RNK_TGT(reg),
+                                        (u32)RIR_RNK_TGT(pvt->info.type, reg),
                                         reg);
                        }
                }
@@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
        pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
                              rir_offset[n_rir][idx],
                              &reg);
-       *rank = RIR_RNK_TGT(reg);
+       *rank = RIR_RNK_TGT(pvt->info.type, reg);
 
        edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
                 n_rir,
@@ -3357,12 +3366,12 @@ fail0:
 #define ICPU(model, table) \
        { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
 
-/* Order here must match "enum type" */
 static const struct x86_cpu_id sbridge_cpuids[] = {
        ICPU(0x2d, pci_dev_descr_sbridge_table),        /* SANDY_BRIDGE */
        ICPU(0x3e, pci_dev_descr_ibridge_table),        /* IVY_BRIDGE */
        ICPU(0x3f, pci_dev_descr_haswell_table),        /* HASWELL */
        ICPU(0x4f, pci_dev_descr_broadwell_table),      /* BROADWELL */
+       ICPU(0x56, pci_dev_descr_broadwell_table),      /* BROADWELL-DE */
        ICPU(0x57, pci_dev_descr_knl_table),            /* KNIGHTS_LANDING */
        { }
 };
@@ -3398,7 +3407,7 @@ static int sbridge_probe(const struct x86_cpu_id *id)
                         mc, mc + 1, num_mc);
 
                sbridge_dev->mc = mc++;
-               rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
+               rc = sbridge_register_mci(sbridge_dev, ptable->type);
                if (unlikely(rc < 0))
                        goto fail1;
        }
index a850cbc48d8d17a3de7ba51cb7ea033e57ed4fcd..c49d50e68aeebb0dbe368f8e8f526b8c18bdebb0 100644 (file)
@@ -174,6 +174,7 @@ static __init void reserve_regions(void)
 {
        efi_memory_desc_t *md;
        u64 paddr, npages, size;
+       int resv;
 
        if (efi_enabled(EFI_DBG))
                pr_info("Processing EFI memory map:\n");
@@ -190,12 +191,14 @@ static __init void reserve_regions(void)
                paddr = md->phys_addr;
                npages = md->num_pages;
 
+               resv = is_reserve_region(md);
                if (efi_enabled(EFI_DBG)) {
                        char buf[64];
 
-                       pr_info("  0x%012llx-0x%012llx %s",
+                       pr_info("  0x%012llx-0x%012llx %s%s\n",
                                paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
-                               efi_md_typeattr_format(buf, sizeof(buf), md));
+                               efi_md_typeattr_format(buf, sizeof(buf), md),
+                               resv ? "*" : "");
                }
 
                memrange_efi_to_native(&paddr, &npages);
@@ -204,14 +207,9 @@ static __init void reserve_regions(void)
                if (is_normal_ram(md))
                        early_init_dt_add_memory_arch(paddr, size);
 
-               if (is_reserve_region(md)) {
+               if (resv)
                        memblock_mark_nomap(paddr, size);
-                       if (efi_enabled(EFI_DBG))
-                               pr_cont("*");
-               }
 
-               if (efi_enabled(EFI_DBG))
-                       pr_cont("\n");
        }
 
        set_bit(EFI_MEMMAP, &efi.flags);
index 992f00b65be45a2b9ca8f16058aaed076918ae2e..01c36b8d6222bfa64ae996ae8b5d565d23f9ed2a 100644 (file)
@@ -799,6 +799,7 @@ struct amdgpu_ring {
        unsigned                cond_exe_offs;
        u64                             cond_exe_gpu_addr;
        volatile u32    *cond_exe_cpu_addr;
+       int                     vmid;
 };
 
 /*
@@ -936,7 +937,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
                    unsigned vm_id, uint64_t pd_addr,
                    uint32_t gds_base, uint32_t gds_size,
                    uint32_t gws_base, uint32_t gws_size,
-                   uint32_t oa_base, uint32_t oa_size);
+                   uint32_t oa_base, uint32_t oa_size,
+                   bool vmid_switch);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
index 199f76baf22c3eea20207fec60c1e868d173d1cb..8943099eb13550bf39688ed175ab24263f5436f3 100644 (file)
@@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
        return result;
 }
 
+static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
+{
+       CGS_FUNC_ADEV;
+       if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
+               release_firmware(adev->pm.fw);
+               return 0;
+       }
+       /* cannot release other firmware because they are not created by cgs */
+       return -EINVAL;
+}
+
 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                        enum cgs_ucode_id type,
                                        struct cgs_firmware_info *info)
@@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
        amdgpu_cgs_pm_query_clock_limits,
        amdgpu_cgs_set_camera_voltages,
        amdgpu_cgs_get_firmware_info,
+       amdgpu_cgs_rel_firmware,
        amdgpu_cgs_set_powergating_state,
        amdgpu_cgs_set_clockgating_state,
        amdgpu_cgs_get_active_displays_info,
index bb8b149786d708de30a0143bb273f6be8af0390b..964f31404f17a9ddad381ed1bbac3c6fbcd36fe4 100644 (file)
@@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
  */
 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
 {
-       if (adev->mode_info.atom_context)
+       if (adev->mode_info.atom_context) {
                kfree(adev->mode_info.atom_context->scratch);
+               kfree(adev->mode_info.atom_context->iio);
+       }
        kfree(adev->mode_info.atom_context);
        adev->mode_info.atom_context = NULL;
        kfree(adev->mode_info.atom_card_info);
@@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
                adev->ip_block_status[i].valid = false;
        }
 
+       for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+               if (adev->ip_blocks[i].funcs->late_fini)
+                       adev->ip_blocks[i].funcs->late_fini((void *)adev);
+       }
+
        return 0;
 }
 
@@ -1513,8 +1520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                amdgpu_atombios_has_gpu_virtualization_table(adev);
 
        /* Post card if necessary */
-       if (!amdgpu_card_posted(adev) ||
-           adev->virtualization.supports_sr_iov) {
+       if (!amdgpu_card_posted(adev)) {
                if (!adev->bios) {
                        dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
                        return -EINVAL;
index 34e35423b78e81d3d91102e2acacf889fa9792c0..7a0b1e50f2939fb95084133d5c7df9ed05cf79df 100644 (file)
@@ -122,6 +122,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        bool skip_preamble, need_ctx_switch;
        unsigned patch_offset = ~0;
        struct amdgpu_vm *vm;
+       int vmid = 0, old_vmid = ring->vmid;
        struct fence *hwf;
        uint64_t ctx;
 
@@ -135,9 +136,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (job) {
                vm = job->vm;
                ctx = job->ctx;
+               vmid = job->vm_id;
        } else {
                vm = NULL;
                ctx = 0;
+               vmid = 0;
        }
 
        if (!ring->ready) {
@@ -163,7 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
                                    job->gds_base, job->gds_size,
                                    job->gws_base, job->gws_size,
-                                   job->oa_base, job->oa_size);
+                                   job->oa_base, job->oa_size,
+                                   (ring->current_ctx == ctx) && (old_vmid != vmid));
                if (r) {
                        amdgpu_ring_undo(ring);
                        return r;
@@ -180,7 +184,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        need_ctx_switch = ring->current_ctx != ctx;
        for (i = 0; i < num_ibs; ++i) {
                ib = &ibs[i];
-
                /* drop preamble IBs if we don't have a context switch */
                if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
                        continue;
@@ -188,6 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
                                    need_ctx_switch);
                need_ctx_switch = false;
+               ring->vmid = vmid;
        }
 
        if (ring->funcs->emit_hdp_invalidate)
@@ -198,6 +202,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
                if (job && job->vm_id)
                        amdgpu_vm_reset_id(adev, job->vm_id);
+               ring->vmid = old_vmid;
                amdgpu_ring_undo(ring);
                return r;
        }
index 6bd961fb43dcc7c7e3562d994e5fc0e79515ed01..82256558e0f59db1b5a3d2af77e3e3d68d3ed2ef 100644 (file)
@@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle)
        if (ret)
                return ret;
 
-#ifdef CONFIG_DRM_AMD_POWERPLAY
-       if (adev->pp_enabled) {
-               amdgpu_pm_sysfs_fini(adev);
-               amd_powerplay_fini(adev->powerplay.pp_handle);
-       }
-#endif
-
        return ret;
 }
 
@@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle)
        return ret;
 }
 
+static void amdgpu_pp_late_fini(void *handle)
+{
+#ifdef CONFIG_DRM_AMD_POWERPLAY
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->pp_enabled) {
+               amdgpu_pm_sysfs_fini(adev);
+               amd_powerplay_fini(adev->powerplay.pp_handle);
+       }
+
+       if (adev->powerplay.ip_funcs->late_fini)
+               adev->powerplay.ip_funcs->late_fini(
+                         adev->powerplay.pp_handle);
+#endif
+}
+
 static int amdgpu_pp_suspend(void *handle)
 {
        int ret = 0;
@@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
        .sw_fini = amdgpu_pp_sw_fini,
        .hw_init = amdgpu_pp_hw_init,
        .hw_fini = amdgpu_pp_hw_fini,
+       .late_fini = amdgpu_pp_late_fini,
        .suspend = amdgpu_pp_suspend,
        .resume = amdgpu_pp_resume,
        .is_idle = amdgpu_pp_is_idle,
index 3b02272db678e173f2caaffd96527aa332326dc0..870f9494252c83925f4898d286f28891c8e79897 100644 (file)
@@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
        ring->ring = NULL;
        ring->ring_obj = NULL;
 
+       amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
        amdgpu_wb_free(ring->adev, ring->fence_offs);
        amdgpu_wb_free(ring->adev, ring->rptr_offs);
        amdgpu_wb_free(ring->adev, ring->wptr_offs);
index 8bf84efafb049cd693e156a8dbf2dff396c70164..48618ee324ebb54ae3b9eab09c8f179164741128 100644 (file)
@@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
                return r;
        }
        r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+       memset(sa_manager->cpu_ptr, 0, sa_manager->size);
        amdgpu_bo_unreserve(sa_manager->bo);
        return r;
 }
index 01abfc21b4a2249c43b5b0ce747b36ac111940c2..e19520c4b4b6e3cd6bd4285c008885998e87187f 100644 (file)
@@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
        int r;
 
-       if (adev->uvd.vcpu_bo == NULL)
-               return 0;
+       kfree(adev->uvd.saved_bo);
 
        amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 
-       r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-       if (!r) {
-               amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
-               amdgpu_bo_unpin(adev->uvd.vcpu_bo);
-               amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-       }
+       if (adev->uvd.vcpu_bo) {
+               r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+               if (!r) {
+                       amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+                       amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+                       amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+               }
 
-       amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+               amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+       }
 
        amdgpu_ring_fini(&adev->uvd.ring);
 
index 9f36ed30ba115b9be28ee3dff2eea75c2fe796f4..62a4c127620f070911fc39a601684bd42d0b519c 100644 (file)
@@ -298,7 +298,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
                    unsigned vm_id, uint64_t pd_addr,
                    uint32_t gds_base, uint32_t gds_size,
                    uint32_t gws_base, uint32_t gws_size,
-                   uint32_t oa_base, uint32_t oa_size)
+                   uint32_t oa_base, uint32_t oa_size,
+                   bool vmid_switch)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
@@ -312,8 +313,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
        int r;
 
        if (ring->funcs->emit_pipeline_sync && (
-           pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
-                   ring->type == AMDGPU_RING_TYPE_COMPUTE))
+           pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch))
                amdgpu_ring_emit_pipeline_sync(ring);
 
        if (ring->funcs->emit_vm_flush &&
index ea407db1fbcfaaa54c844e7f41e06eb3a04bb1a2..5ec1f1e9c983664a137a19ce30d10fe0b0b9e868 100644 (file)
@@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle)
        ci_dpm_fini(adev);
        mutex_unlock(&adev->pm.mutex);
 
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
index 518dca43b133a2eaa638031be93c4af9522c5b81..9dc4e24e31e73c2f98ffe845f649f4a914e24290 100644 (file)
@@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
 
 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
 
+
+static void cik_sdma_free_microcode(struct amdgpu_device *adev)
+{
+       int i;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+                       release_firmware(adev->sdma.instance[i].fw);
+                       adev->sdma.instance[i].fw = NULL;
+       }
+}
+
 /*
  * sDMA - System DMA
  * Starting with CIK, the GPU has new asynchronous
@@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
                /* set the wb address whether it's enabled or not */
                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
+
+       cik_sdma_enable(adev, true);
 
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       /* unhalt the MEs */
-       cik_sdma_enable(adev, true);
+       /* halt the engine before programing */
+       cik_sdma_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = cik_sdma_gfx_resume(adev);
@@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle)
        for (i = 0; i < adev->sdma.num_instances; i++)
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+       cik_sdma_free_microcode(adev);
        return 0;
 }
 
index 245cabf06575c73e1e9427011d878237e2e96fdf..ed03b75175d48b1ba24754f2e12499ac1041bb17 100644 (file)
@@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle)
 
 static int fiji_dpm_sw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
index 7f18a53ab53ac8bc18c992d7fdc76d10a283c3d4..8c6ad1e72f02f190aef89342b4bce6b6de1e765e 100644 (file)
@@ -991,6 +991,22 @@ out:
        return err;
 }
 
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+       release_firmware(adev->gfx.pfp_fw);
+       adev->gfx.pfp_fw = NULL;
+       release_firmware(adev->gfx.me_fw);
+       adev->gfx.me_fw = NULL;
+       release_firmware(adev->gfx.ce_fw);
+       adev->gfx.ce_fw = NULL;
+       release_firmware(adev->gfx.mec_fw);
+       adev->gfx.mec_fw = NULL;
+       release_firmware(adev->gfx.mec2_fw);
+       adev->gfx.mec2_fw = NULL;
+       release_firmware(adev->gfx.rlc_fw);
+       adev->gfx.rlc_fw = NULL;
+}
+
 /**
  * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
  *
@@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle)
        gfx_v7_0_cp_compute_fini(adev);
        gfx_v7_0_rlc_fini(adev);
        gfx_v7_0_mec_fini(adev);
+       gfx_v7_0_free_microcode(adev);
 
        return 0;
 }
index f19bab68fd837ffe3cacb54ef04f9d50dc8fee27..9f6f8669edc3ec424cdc156cbce2ac8596e94e1e 100644 (file)
@@ -836,6 +836,26 @@ err1:
        return r;
 }
 
+
+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
+       release_firmware(adev->gfx.pfp_fw);
+       adev->gfx.pfp_fw = NULL;
+       release_firmware(adev->gfx.me_fw);
+       adev->gfx.me_fw = NULL;
+       release_firmware(adev->gfx.ce_fw);
+       adev->gfx.ce_fw = NULL;
+       release_firmware(adev->gfx.rlc_fw);
+       adev->gfx.rlc_fw = NULL;
+       release_firmware(adev->gfx.mec_fw);
+       adev->gfx.mec_fw = NULL;
+       if ((adev->asic_type != CHIP_STONEY) &&
+           (adev->asic_type != CHIP_TOPAZ))
+               release_firmware(adev->gfx.mec2_fw);
+       adev->gfx.mec2_fw = NULL;
+
+       kfree(adev->gfx.rlc.register_list_format);
+}
+
 static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 {
        const char *chip_name;
@@ -1983,7 +2003,7 @@ static int gfx_v8_0_sw_fini(void *handle)
 
        gfx_v8_0_rlc_fini(adev);
 
-       kfree(adev->gfx.rlc.register_list_format);
+       gfx_v8_0_free_microcode(adev);
 
        return 0;
 }
@@ -3974,11 +3994,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
                amdgpu_ring_write(ring, 0x3a00161a);
                amdgpu_ring_write(ring, 0x0000002e);
                break;
-       case CHIP_TOPAZ:
        case CHIP_CARRIZO:
                amdgpu_ring_write(ring, 0x00000002);
                amdgpu_ring_write(ring, 0x00000000);
                break;
+       case CHIP_TOPAZ:
+               amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
+                               0x00000000 : 0x00000002);
+               amdgpu_ring_write(ring, 0x00000000);
+               break;
        case CHIP_STONEY:
                amdgpu_ring_write(ring, 0x00000000);
                amdgpu_ring_write(ring, 0x00000000);
index 460bc8ad37e6252a910c068e5114bcc687350faf..825ccd63f2dc313ced6199d476f0b96d8b8cdba6 100644 (file)
@@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle)
 
 static int iceland_dpm_sw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
index f4c3130d3fdb20eee732b7c4e6e123ca008906a1..b556bd0a8797edce702d3c26ce78ff331ea82945 100644 (file)
@@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
        }
 }
 
+static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
+{
+       int i;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               release_firmware(adev->sdma.instance[i].fw);
+               adev->sdma.instance[i].fw = NULL;
+       }
+}
+
 /**
  * sdma_v2_4_init_microcode - load ucode images from disk
  *
@@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
                /* set the wb address whether it's enabled or not */
                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
 
+       sdma_v2_4_enable(adev, true);
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
                        return -EINVAL;
        }
 
-       /* unhalt the MEs */
-       sdma_v2_4_enable(adev, true);
+       /* halt the engine before programing */
+       sdma_v2_4_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v2_4_gfx_resume(adev);
@@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle)
        for (i = 0; i < adev->sdma.num_instances; i++)
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+       sdma_v2_4_free_microcode(adev);
        return 0;
 }
 
index 31d99b0010f79a032540b06cf0bc3c04d498f78c..532ea88da66abdff73e21d86debf28867d2d75fb 100644 (file)
@@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
        }
 }
 
+static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
+{
+       int i;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               release_firmware(adev->sdma.instance[i].fw);
+               adev->sdma.instance[i].fw = NULL;
+       }
+}
+
 /**
  * sdma_v3_0_init_microcode - load ucode images from disk
  *
@@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
                /* set the wb address whether it's enabled or not */
                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
+
+       /* unhalt the MEs */
+       sdma_v3_0_enable(adev, true);
+       /* enable sdma ring preemption */
+       sdma_v3_0_ctx_switch_enable(adev, true);
 
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       /* unhalt the MEs */
-       sdma_v3_0_enable(adev, true);
-       /* enable sdma ring preemption */
-       sdma_v3_0_ctx_switch_enable(adev, true);
+       /* disble sdma engine before programing it */
+       sdma_v3_0_ctx_switch_enable(adev, false);
+       sdma_v3_0_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v3_0_gfx_resume(adev);
@@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle)
        for (i = 0; i < adev->sdma.num_instances; i++)
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+       sdma_v3_0_free_microcode(adev);
        return 0;
 }
 
index b7615cefcac40b5d2607a194b5184ed2fe3a9bcf..f06f6f4dc3a8a93730a38050d269d6df00e3cc1e 100644 (file)
@@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle)
 
 static int tonga_dpm_sw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
index 6080951d539d0d3fd4c85390a0afa18ce2df8cd7..afce1edbe2503305005b4d72c8371f7e6bc5dfc8 100644 (file)
@@ -157,6 +157,7 @@ struct amd_ip_funcs {
        int (*hw_init)(void *handle);
        /* tears down the hw state */
        int (*hw_fini)(void *handle);
+       void (*late_fini)(void *handle);
        /* handles IP specific hw/sw changes for suspend */
        int (*suspend)(void *handle);
        /* handles IP specific hw/sw changes for resume */
index a461e155a160240f7f127c156b902b56651c523b..7464daf89ca1319fb64a11646bfa26d98c473e5d 100644 (file)
@@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
                                     enum cgs_ucode_id type,
                                     struct cgs_firmware_info *info);
 
+typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
+                                        enum cgs_ucode_id type);
+
 typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
                                  enum amd_ip_block_type block_type,
                                  enum amd_powergating_state state);
@@ -645,6 +648,7 @@ struct cgs_ops {
        cgs_set_camera_voltages_t set_camera_voltages;
        /* Firmware Info */
        cgs_get_firmware_info get_firmware_info;
+       cgs_rel_firmware rel_firmware;
        /* cg pg interface*/
        cgs_set_powergating_state set_powergating_state;
        cgs_set_clockgating_state set_clockgating_state;
@@ -738,6 +742,8 @@ struct cgs_device
        CGS_CALL(set_camera_voltages,dev,mask,voltages)
 #define cgs_get_firmware_info(dev, type, info) \
        CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_rel_firmware(dev, type)    \
+       CGS_CALL(rel_firmware, dev, type)
 #define cgs_set_powergating_state(dev, block_type, state)      \
        CGS_CALL(set_powergating_state, dev, block_type, state)
 #define cgs_set_clockgating_state(dev, block_type, state)      \
index 8e345bfddb693033d08584d1093f254ef9619a38..e629f8a9fe936be12cb587b921675363ae425cd7 100644 (file)
@@ -73,11 +73,14 @@ static int pp_sw_init(void *handle)
 
        ret = hwmgr->hwmgr_func->backend_init(hwmgr);
        if (ret)
-               goto err;
+               goto err1;
 
        pr_info("amdgpu: powerplay initialized\n");
 
        return 0;
+err1:
+       if (hwmgr->pptable_func->pptable_fini)
+               hwmgr->pptable_func->pptable_fini(hwmgr);
 err:
        pr_err("amdgpu: powerplay initialization failed\n");
        return ret;
@@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle)
        if (hwmgr->hwmgr_func->backend_fini != NULL)
                ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
 
+       if (hwmgr->pptable_func->pptable_fini)
+               hwmgr->pptable_func->pptable_fini(hwmgr);
+
        return ret;
 }
 
index 46410e3c73493acce741f955f60790fc9745256e..fb88e4e5d625815ab213b1011392bce0ef96eae6 100644 (file)
@@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
        pem_unregister_interrupts(eventmgr);
 
        pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-
-       if (eventmgr != NULL)
-               kfree(eventmgr);
 }
 
 int eventmgr_init(struct pp_instance *handle)
index 24a16e49b5713d07f8a53aa81fb719ab75383500..586f73276226415b4384482720ce605e85921da2 100644 (file)
@@ -1830,7 +1830,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
 
        PP_ASSERT_WITH_CODE(false,
                        "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
-                       return vddci_table->entries[i].value);
+                       return vddci_table->entries[i-1].value);
 }
 
 static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
index 1c48917da3cf3712b39eb72aaf1511fc3936845c..20f20e0755881ee868bf7995b8600d8e4697e679 100644 (file)
@@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
        if (hwmgr == NULL || hwmgr->ps == NULL)
                return -EINVAL;
 
+       /* do hwmgr finish*/
+       kfree(hwmgr->backend);
+
+       kfree(hwmgr->start_thermal_controller.function_list);
+
+       kfree(hwmgr->set_temperature_range.function_list);
+
        kfree(hwmgr->ps);
        kfree(hwmgr);
        return 0;
@@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
 
        PP_ASSERT_WITH_CODE(false,
                        "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
-                       return vddci_table->entries[i].value);
+                       return vddci_table->entries[i-1].value);
 }
 
 int phm_find_boot_level(void *table,
index 0b99ab3ba0c5063ca8d3cd70c91e3bab0c3f5ab1..ae96f14b827cd45261483d18a19cd6de1e3b3420 100644 (file)
@@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
 
                if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
                                (uint8_t *)&data->power_tune_table,
-                               sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
+                               (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
                        PP_ASSERT_WITH_CODE(false,
                                        "Attempt to download PmFuseTable Failed!",
                                        return -EINVAL);
index 16fed487973b9bd28396d109ba079086b127379c..d27e8c40602a03cbf434904b104c4fd50ff714bf 100644 (file)
@@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
                }
        }
 
-       /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
-       for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
-               data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
-               /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
-               /* param1 is for corresponding std voltage */
-               data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
-       }
-       data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
-
-       if (NULL != allowed_vdd_mclk_table) {
-               /* Initialize Vddci DPM table based on allow Mclk values */
-               for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
-                       data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
-                       data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
-                       data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
-                       data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
-               }
-               data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
-               data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
-       }
-
        /* setup PCIE gen speed levels*/
        tonga_setup_default_pcie_tables(hwmgr);
 
index 10e3630ee39d8d86c4897754274d70b451203328..296ec7ef6d45270c3224c49fd5275dfb18e422ba 100644 (file)
@@ -1040,48 +1040,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
        struct phm_ppt_v1_information *pp_table_information =
                (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
-       if (NULL != hwmgr->soft_pp_table) {
-               kfree(hwmgr->soft_pp_table);
+       if (NULL != hwmgr->soft_pp_table)
                hwmgr->soft_pp_table = NULL;
-       }
 
-       if (NULL != pp_table_information->vdd_dep_on_sclk)
-               pp_table_information->vdd_dep_on_sclk = NULL;
+       kfree(pp_table_information->vdd_dep_on_sclk);
+       pp_table_information->vdd_dep_on_sclk = NULL;
 
-       if (NULL != pp_table_information->vdd_dep_on_mclk)
-               pp_table_information->vdd_dep_on_mclk = NULL;
+       kfree(pp_table_information->vdd_dep_on_mclk);
+       pp_table_information->vdd_dep_on_mclk = NULL;
 
-       if (NULL != pp_table_information->valid_mclk_values)
-               pp_table_information->valid_mclk_values = NULL;
+       kfree(pp_table_information->valid_mclk_values);
+       pp_table_information->valid_mclk_values = NULL;
 
-       if (NULL != pp_table_information->valid_sclk_values)
-               pp_table_information->valid_sclk_values = NULL;
+       kfree(pp_table_information->valid_sclk_values);
+       pp_table_information->valid_sclk_values = NULL;
 
-       if (NULL != pp_table_information->vddc_lookup_table)
-               pp_table_information->vddc_lookup_table = NULL;
+       kfree(pp_table_information->vddc_lookup_table);
+       pp_table_information->vddc_lookup_table = NULL;
 
-       if (NULL != pp_table_information->vddgfx_lookup_table)
-               pp_table_information->vddgfx_lookup_table = NULL;
+       kfree(pp_table_information->vddgfx_lookup_table);
+       pp_table_information->vddgfx_lookup_table = NULL;
 
-       if (NULL != pp_table_information->mm_dep_table)
-               pp_table_information->mm_dep_table = NULL;
+       kfree(pp_table_information->mm_dep_table);
+       pp_table_information->mm_dep_table = NULL;
 
-       if (NULL != pp_table_information->cac_dtp_table)
-               pp_table_information->cac_dtp_table = NULL;
+       kfree(pp_table_information->cac_dtp_table);
+       pp_table_information->cac_dtp_table = NULL;
 
-       if (NULL != hwmgr->dyn_state.cac_dtp_table)
-               hwmgr->dyn_state.cac_dtp_table = NULL;
+       kfree(hwmgr->dyn_state.cac_dtp_table);
+       hwmgr->dyn_state.cac_dtp_table = NULL;
 
-       if (NULL != pp_table_information->ppm_parameter_table)
-               pp_table_information->ppm_parameter_table = NULL;
+       kfree(pp_table_information->ppm_parameter_table);
+       pp_table_information->ppm_parameter_table = NULL;
 
-       if (NULL != pp_table_information->pcie_table)
-               pp_table_information->pcie_table = NULL;
+       kfree(pp_table_information->pcie_table);
+       pp_table_information->pcie_table = NULL;
 
-       if (NULL != hwmgr->pptable) {
-               kfree(hwmgr->pptable);
-               hwmgr->pptable = NULL;
-       }
+       kfree(hwmgr->pptable);
+       hwmgr->pptable = NULL;
 
        return result;
 }
index 673a75c74e18a33c2a42f0ae92bca8e40ee30603..8e52a2e82db58c4de3ffc7ff3903bbabec037f4a 100644 (file)
@@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
 
 static int fiji_smu_fini(struct pp_smumgr *smumgr)
 {
+       struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+
+       smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
        if (smumgr->backend) {
                kfree(smumgr->backend);
                smumgr->backend = NULL;
        }
+
+       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
        return 0;
 }
 
index de618ead9db8df73e10ee0da9dac0cad940049b3..043b6ac09d5f6a73c09a4ecf8edf0ceca34f4c7f 100644 (file)
@@ -469,6 +469,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr)
                kfree(smumgr->backend);
                smumgr->backend = NULL;
        }
+       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
        return 0;
 }
 
index c483baf6b4fbc9bb8b76d293c772f727631c0726..0728c1e3d97a0137baf07466df1964a67f028b8e 100644 (file)
@@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 
 int smum_fini(struct pp_smumgr *smumgr)
 {
+       kfree(smumgr->device);
        kfree(smumgr);
        return 0;
 }
index 32820b680d8886104a1e8c28f7324489e9c4c375..b22722eabafcbc53477dd284a2671326443ec011 100644 (file)
@@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
 
 static int tonga_smu_fini(struct pp_smumgr *smumgr)
 {
+       struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
+
+       smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
+       smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
        if (smumgr->backend != NULL) {
                kfree(smumgr->backend);
                smumgr->backend = NULL;
        }
+
+       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
        return 0;
 }
 
index 0ec1ad961e0dbee6bf50de70cf05f23592241f51..dc723f7ead7d49e0f610bf1c2ae5260243024ee8 100644 (file)
@@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = {
        .reg_bits = 32,
        .reg_stride = 4,
        .val_bits = 32,
-       .cache_type = REGCACHE_RBTREE,
+       .cache_type = REGCACHE_FLAT,
 
        .volatile_reg = fsl_dcu_drm_is_volatile_reg,
+       .max_register = 0x11fc,
 };
 
 static int fsl_dcu_drm_irq_init(struct drm_device *dev)
index fbe304ee6c809548bf734be1e9ee7d56a1554392..2aec27dbb5bbbb36f5236e7488ed1677ae67de10 100644 (file)
@@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        }
 
        adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
-       if (!adreno_gpu->memptrs) {
+       if (IS_ERR(adreno_gpu->memptrs)) {
                dev_err(drm->dev, "could not vmap memptrs\n");
                return -ENOMEM;
        }
index d9759bf3482ed54c19692f7ba108075b17529320..c6cf837c51938ee12d9bfb9b6ddf1d049c27fac4 100644 (file)
@@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        dev->mode_config.fb_base = paddr;
 
        fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+       if (IS_ERR(fbi->screen_base)) {
+               ret = PTR_ERR(fbi->screen_base);
+               goto fail_unlock;
+       }
        fbi->screen_size = fbdev->bo->size;
        fbi->fix.smem_start = paddr;
        fbi->fix.smem_len = fbdev->bo->size;
index 7daf4054dd2b78ea634641bf4554fa6cb3d3a444..69836f5685b1ba63b3038ce90da38e9b18d8c6f3 100644 (file)
@@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
                        return ERR_CAST(pages);
                msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
                                VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+               if (msm_obj->vaddr == NULL)
+                       return ERR_PTR(-ENOMEM);
        }
        return msm_obj->vaddr;
 }
index b89ca5174863e7ab73c48117605ded1d56626e19..eb4bb8b2f3a5cae6799e1ef142af1be3da568e5d 100644 (file)
@@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
 
        submit->dev = dev;
        submit->gpu = gpu;
+       submit->fence = NULL;
        submit->pid = get_pid(task_pid(current));
 
        /* initially, until copy_from_user() and bo lookup succeeds: */
        submit->nr_bos = 0;
        submit->nr_cmds = 0;
 
+       INIT_LIST_HEAD(&submit->node);
        INIT_LIST_HEAD(&submit->bo_list);
        ww_acquire_init(&submit->ticket, &reservation_ww_class);
 
@@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                void __user *userptr =
                        u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
 
+               /* make sure we don't have garbage flags, in case we hit
+                * error path before flags is initialized:
+                */
+               submit->bos[i].flags = 0;
+
                ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
                if (ret) {
                        ret = -EFAULT;
index b48f73ac63896a087938ae6225836a5d30d1a482..0857710c2ff2f809751bbab313045dfa8acd77cd 100644 (file)
@@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
                struct msm_gem_object *obj = submit->bos[idx].obj;
                const char *buf = msm_gem_vaddr_locked(&obj->base);
 
+               if (IS_ERR(buf))
+                       continue;
+
                buf += iova - submit->bos[idx].iova;
 
                rd_write_section(rd, RD_GPUADDR,
index 1f14b908b22136117eb2b37179d05abfedce2b13..42f5359cf98889cfa837f9ec60eb6d55f1b6e09d 100644 (file)
@@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
        }
 
        ring->start = msm_gem_vaddr_locked(ring->bo);
+       if (IS_ERR(ring->start)) {
+               ret = PTR_ERR(ring->start);
+               goto fail;
+       }
        ring->end   = ring->start + (size / 4);
        ring->cur   = ring->start;
 
index c612dc1f1eb4ef651faa4ec398904a894b0ac814..126a85cc81bc22a763c06bc3dcb204b10793b59c 100644 (file)
@@ -16,9 +16,9 @@ enum nvkm_devidx {
        NVKM_SUBDEV_MC,
        NVKM_SUBDEV_BUS,
        NVKM_SUBDEV_TIMER,
+       NVKM_SUBDEV_INSTMEM,
        NVKM_SUBDEV_FB,
        NVKM_SUBDEV_LTC,
-       NVKM_SUBDEV_INSTMEM,
        NVKM_SUBDEV_MMU,
        NVKM_SUBDEV_BAR,
        NVKM_SUBDEV_PMU,
index db10c11f0595deabf3f3acbcac7299b658a388a8..c5a6ebd5a478691c90c61175adec8cb4df6c4600 100644 (file)
@@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
 
 struct nvbios_ocfg {
-       u16 match;
+       u8  proto;
+       u8  flags;
        u16 clkcmp[2];
 };
 
@@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
 u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
-u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
+u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
 u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
 #endif
index 57aaf98a26f9c3d5bcfd91e351f29c2e89b7c276..300ea03be8f07aa71b45dbb6acbf599a01cbdccc 100644 (file)
@@ -552,6 +552,7 @@ nouveau_fbcon_init(struct drm_device *dev)
        if (ret)
                goto fini;
 
+       fbcon->helper.fbdev->pixmap.buf_align = 4;
        return 0;
 
 fini:
index 0f3e4bb411cc99c9052beb5b862dd9927888ad7e..7d9248b8c66479b7b72c51a6e856e25428eebb49 100644 (file)
@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        uint32_t fg;
        uint32_t bg;
        uint32_t dsize;
-       uint32_t width;
        uint32_t *data = (uint32_t *)image->data;
        int ret;
 
@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        if (ret)
                return ret;
 
-       width = ALIGN(image->width, 8);
-       dsize = ALIGN(width * image->height, 32) >> 5;
-
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
                fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
                         ((image->dx + image->width) & 0xffff));
        OUT_RING(chan, bg);
        OUT_RING(chan, fg);
-       OUT_RING(chan, (image->height << 16) | width);
+       OUT_RING(chan, (image->height << 16) | image->width);
        OUT_RING(chan, (image->height << 16) | image->width);
        OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
 
+       dsize = ALIGN(image->width * image->height, 32) >> 5;
        while (dsize) {
                int iter_len = dsize > 128 ? 128 : dsize;
 
index 33d9ee0fac4084918c002d2164977a53e03ae65a..1aeb698e97079bd2e13c515f610a77155497b3fb 100644 (file)
@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
        struct nouveau_channel *chan = drm->channel;
-       uint32_t width, dwords, *data = (uint32_t *)image->data;
+       uint32_t dwords, *data = (uint32_t *)image->data;
        uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
        uint32_t *palette = info->pseudo_palette;
        int ret;
@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        if (ret)
                return ret;
 
-       width = ALIGN(image->width, 32);
-       dwords = (width * image->height) >> 5;
-
        BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        OUT_RING(chan, 0);
        OUT_RING(chan, image->dy);
 
+       dwords = ALIGN(image->width * image->height, 32) >> 5;
        while (dwords) {
                int push = dwords > 2047 ? 2047 : dwords;
 
index a0913359ac05a02046f9286254b98fa58a650ef4..839f4c8c18056c5bce7dbd8c1511db77784609d8 100644 (file)
@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
        struct nouveau_channel *chan = drm->channel;
-       uint32_t width, dwords, *data = (uint32_t *)image->data;
+       uint32_t dwords, *data = (uint32_t *)image->data;
        uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
        uint32_t *palette = info->pseudo_palette;
        int ret;
@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        if (ret)
                return ret;
 
-       width = ALIGN(image->width, 32);
-       dwords = (width * image->height) >> 5;
-
        BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        OUT_RING  (chan, 0);
        OUT_RING  (chan, image->dy);
 
+       dwords = ALIGN(image->width * image->height, 32) >> 5;
        while (dwords) {
                int push = dwords > 2047 ? 2047 : dwords;
 
index a74c5dd27dc0f8774706bc41f81c7d43169aa339..e2a64ed14b22a06c21d15354f7311027228c6b75 100644 (file)
@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
 nvkm-y += nvkm/engine/disp/sornv50.o
 nvkm-y += nvkm/engine/disp/sorg94.o
 nvkm-y += nvkm/engine/disp/sorgf119.o
+nvkm-y += nvkm/engine/disp/sorgm107.o
 nvkm-y += nvkm/engine/disp/sorgm200.o
 nvkm-y += nvkm/engine/disp/dport.o
 
index f0314664349c3b2c39ef650e1ecdc636bde5aae1..5dd34382f55a311f957153ece26ed5d806b5261f 100644 (file)
@@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
        mask |= 0x0001 << or;
        mask |= 0x0100 << head;
 
+
        list_for_each_entry(outp, &disp->base.outp, head) {
                if ((outp->info.hasht & 0xff) == type &&
                    (outp->info.hashm & mask) == mask) {
@@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
        if (!outp)
                return NULL;
 
+       *conf = (ctrl & 0x00000f00) >> 8;
        switch (outp->info.type) {
        case DCB_OUTPUT_TMDS:
-               *conf = (ctrl & 0x00000f00) >> 8;
                if (*conf == 5)
                        *conf |= 0x0100;
                break;
        case DCB_OUTPUT_LVDS:
-               *conf = disp->sor.lvdsconf;
-               break;
-       case DCB_OUTPUT_DP:
-               *conf = (ctrl & 0x00000f00) >> 8;
+               *conf |= disp->sor.lvdsconf;
                break;
-       case DCB_OUTPUT_ANALOG:
        default:
-               *conf = 0x00ff;
                break;
        }
 
-       data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+       data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+                                &ver, &hdr, &cnt, &len, &info2);
        if (data && id < 0xff) {
                data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
                if (data) {
index b6944142d616f91fba4886521994b60ee50a18e0..f4b9cf8574be6c8ce66f51a658790eae552c2322 100644 (file)
@@ -36,7 +36,7 @@ gm107_disp = {
        .outp.internal.crt = nv50_dac_output_new,
        .outp.internal.tmds = nv50_sor_output_new,
        .outp.internal.lvds = nv50_sor_output_new,
-       .outp.internal.dp = gf119_sor_dp_new,
+       .outp.internal.dp = gm107_sor_dp_new,
        .dac.nr = 3,
        .dac.power = nv50_dac_power,
        .dac.sense = nv50_dac_sense,
index 4226d2153b9c2a1f8072697fbb77c1b7838e9e09..fcb1b0c46d64f60c85e8f931f3d091e6694a85cf 100644 (file)
@@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
        if (!outp)
                return NULL;
 
+       *conf = (ctrl & 0x00000f00) >> 8;
        if (outp->info.location == 0) {
                switch (outp->info.type) {
                case DCB_OUTPUT_TMDS:
-                       *conf = (ctrl & 0x00000f00) >> 8;
                        if (*conf == 5)
                                *conf |= 0x0100;
                        break;
                case DCB_OUTPUT_LVDS:
-                       *conf = disp->sor.lvdsconf;
+                       *conf |= disp->sor.lvdsconf;
                        break;
-               case DCB_OUTPUT_DP:
-                       *conf = (ctrl & 0x00000f00) >> 8;
-                       break;
-               case DCB_OUTPUT_ANALOG:
                default:
-                       *conf = 0x00ff;
                        break;
                }
        } else {
@@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
                pclk = pclk / 2;
        }
 
-       data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+       data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+                                &ver, &hdr, &cnt, &len, &info2);
        if (data && id < 0xff) {
                data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
                if (data) {
index e9067ba4e1790d1e70a5cce27035f30234bc8856..4e983f6d7032e897e033085a196405d7c20d9fec 100644 (file)
@@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
 int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
                     struct nvkm_output **);
 int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
 
-int  gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
-                     struct nvkm_output **);
+int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+                    struct nvkm_output **);
+int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
+
+int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+                    struct nvkm_output **);
 #endif
index b4b41b13564344978517fad5c1a5622430fc1720..22706c0a54b53a9f95a1ad7e453217b7276cfcaf 100644 (file)
@@ -40,8 +40,7 @@ static int
 gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
        struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-       const u32 loff = gf119_sor_loff(outp);
-       nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+       nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
        return 0;
 }
 
@@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
        return 0;
 }
 
-static int
+int
 gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
                     int ln, int vs, int pe, int pc)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
new file mode 100644 (file)
index 0000000..37790b2
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "outpdp.h"
+
+int
+gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+       struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+       const u32 soff = outp->base.or * 0x800;
+       const u32 data = 0x01010101 * pattern;
+       if (outp->base.info.sorconf.link & 1)
+               nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+       else
+               nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+       return 0;
+}
+
+static const struct nvkm_output_dp_func
+gm107_sor_dp_func = {
+       .pattern = gm107_sor_dp_pattern,
+       .lnk_pwr = g94_sor_dp_lnk_pwr,
+       .lnk_ctl = gf119_sor_dp_lnk_ctl,
+       .drv_ctl = gf119_sor_dp_drv_ctl,
+};
+
+int
+gm107_sor_dp_new(struct nvkm_disp *disp, int index,
+                struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+       return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
+}
index 2cfbef9c344ff68ba9e71805cbbe911aed4ddf5e..c44fa7ea672ab20192c8498e418018492ad93252 100644 (file)
@@ -56,19 +56,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
        return lane * 0x08;
 }
 
-static int
-gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
-       struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-       const u32 soff = gm200_sor_soff(outp);
-       const u32 data = 0x01010101 * pattern;
-       if (outp->base.info.sorconf.link & 1)
-               nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
-       else
-               nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
-       return 0;
-}
-
 static int
 gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
@@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 
 static const struct nvkm_output_dp_func
 gm200_sor_dp_func = {
-       .pattern = gm200_sor_dp_pattern,
+       .pattern = gm107_sor_dp_pattern,
        .lnk_pwr = gm200_sor_dp_lnk_pwr,
        .lnk_ctl = gf119_sor_dp_lnk_ctl,
        .drv_ctl = gm200_sor_dp_drv_ctl,
index 9513badb8220e8dfa01f4fa5f129692507f90c01..ae9ab5b1ab979f7b7dcd206e55be4d895855067c 100644 (file)
@@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
 }
 
 static const struct nvkm_enum gf100_mp_warp_error[] = {
-       { 0x00, "NO_ERROR" },
-       { 0x01, "STACK_MISMATCH" },
+       { 0x01, "STACK_ERROR" },
+       { 0x02, "API_STACK_ERROR" },
+       { 0x03, "RET_EMPTY_STACK_ERROR" },
+       { 0x04, "PC_WRAP" },
        { 0x05, "MISALIGNED_PC" },
-       { 0x08, "MISALIGNED_GPR" },
-       { 0x09, "INVALID_OPCODE" },
-       { 0x0d, "GPR_OUT_OF_BOUNDS" },
-       { 0x0e, "MEM_OUT_OF_BOUNDS" },
-       { 0x0f, "UNALIGNED_MEM_ACCESS" },
+       { 0x06, "PC_OVERFLOW" },
+       { 0x07, "MISALIGNED_IMMC_ADDR" },
+       { 0x08, "MISALIGNED_REG" },
+       { 0x09, "ILLEGAL_INSTR_ENCODING" },
+       { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
+       { 0x0b, "ILLEGAL_INSTR_PARAM" },
+       { 0x0c, "INVALID_CONST_ADDR" },
+       { 0x0d, "OOR_REG" },
+       { 0x0e, "OOR_ADDR" },
+       { 0x0f, "MISALIGNED_ADDR" },
        { 0x10, "INVALID_ADDR_SPACE" },
-       { 0x11, "INVALID_PARAM" },
+       { 0x11, "ILLEGAL_INSTR_PARAM2" },
+       { 0x12, "INVALID_CONST_ADDR_LDC" },
+       { 0x13, "GEOMETRY_SM_ERROR" },
+       { 0x14, "DIVERGENT" },
+       { 0x15, "WARP_EXIT" },
        {}
 };
 
 static const struct nvkm_bitfield gf100_mp_global_error[] = {
+       { 0x00000001, "SM_TO_SM_FAULT" },
+       { 0x00000002, "L1_ERROR" },
        { 0x00000004, "MULTIPLE_WARP_ERRORS" },
-       { 0x00000008, "OUT_OF_STACK_SPACE" },
+       { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
+       { 0x00000010, "BPT_INT" },
+       { 0x00000020, "BPT_PAUSE" },
+       { 0x00000040, "SINGLE_STEP_COMPLETE" },
+       { 0x20000000, "ECC_SEC_ERROR" },
+       { 0x40000000, "ECC_DED_ERROR" },
+       { 0x80000000, "TIMEOUT" },
        {}
 };
 
index a5e92135cd778682cbf3b06889e536aa25d07bb3..9efb1b48cd54d6d93404da7fedab525e4fb75417 100644 (file)
@@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
 {
        u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
        if (data) {
-               info->match     = nvbios_rd16(bios, data + 0x00);
+               info->proto     = nvbios_rd08(bios, data + 0x00);
+               info->flags     = nvbios_rd16(bios, data + 0x01);
                info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
                info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
        }
@@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
 }
 
 u16
-nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
+nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
 {
        u16 data, idx = 0;
        while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
-               if (info->match == type)
+               if ((info->proto == proto || info->proto == 0xff) &&
+                   (info->flags == flags))
                        break;
        }
        return data;
index e292f5679418ccfdf23e53e9dbbec7b92b231539..389fb13a19985aa68bf40cfb7c0909fbb4fde3d5 100644 (file)
@@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
 }
 
 static void
-gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
+gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
 {
        struct nvkm_subdev *subdev = &ltc->subdev;
        struct nvkm_device *device = subdev->device;
-       u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
+       u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
        u32 stat = nvkm_rd32(device, base + 0x00c);
 
        if (stat) {
@@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc)
        while (mask) {
                u32 s, c = __ffs(mask);
                for (s = 0; s < ltc->lts_nr; s++)
-                       gm107_ltc_lts_isr(ltc, c, s);
+                       gm107_ltc_intr_lts(ltc, c, s);
                mask &= ~(1 << c);
        }
 }
index 2a29bfd5125aef5c5f8aa97c7ce0bd91d61dfd94..e18e0dc19ec8b1f988c2ea30ec4e5d274af0b59f 100644 (file)
@@ -46,7 +46,7 @@ static const struct nvkm_ltc_func
 gm200_ltc = {
        .oneinit = gm200_ltc_oneinit,
        .init = gm200_ltc_init,
-       .intr = gm107_ltc_intr, /*XXX: not validated */
+       .intr = gm107_ltc_intr,
        .cbc_clear = gm107_ltc_cbc_clear,
        .cbc_wait = gm107_ltc_cbc_wait,
        .zbc = 16,
index 9ed8272e54ae183240c7560f35f969470acb6e5e..56c43f355ce3c8a8d293dd0fef8bc846837ecb61 100644 (file)
@@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct regulator *vdds_dsi;
-       int r;
 
        if (dsi->vdds_dsi_reg != NULL)
                return 0;
index e129245eb8a9f56c989b342aca9ad25e15abfc8d..9255c0e1e4a7118b3a536241d86aded7fa26d0c4 100644 (file)
@@ -120,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
 
 static int hdmi_init_regulator(void)
 {
-       int r;
        struct regulator *reg;
 
        if (hdmi.vdda_reg != NULL)
index 904d0754ad78928fc4090b1121ae7537edf14147..0f18b76c790628324325a7093b6e26e0282e94d8 100644 (file)
@@ -456,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
 
        WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
 
-       HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
-                 vc4_state->mm.start);
-
-       if (debug_dump_regs) {
-               DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
-               vc4_hvs_dump_state(dev);
-       }
-
        if (crtc->state->event) {
                unsigned long flags;
 
@@ -473,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
 
                spin_lock_irqsave(&dev->event_lock, flags);
                vc4_crtc->event = crtc->state->event;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
                crtc->state->event = NULL;
+
+               HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+                         vc4_state->mm.start);
+
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       } else {
+               HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+                         vc4_state->mm.start);
+       }
+
+       if (debug_dump_regs) {
+               DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+               vc4_hvs_dump_state(dev);
        }
 }
 
@@ -500,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
 {
        struct drm_crtc *crtc = &vc4_crtc->base;
        struct drm_device *dev = crtc->dev;
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+       u32 chan = vc4_crtc->channel;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (vc4_crtc->event) {
+       if (vc4_crtc->event &&
+           (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
                drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
                vc4_crtc->event = NULL;
+               drm_crtc_vblank_put(crtc);
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
@@ -556,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
                spin_unlock_irqrestore(&dev->event_lock, flags);
        }
 
+       drm_crtc_vblank_put(crtc);
        drm_framebuffer_unreference(flip_state->fb);
        kfree(flip_state);
 
@@ -598,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
                return ret;
        }
 
+       WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
        /* Immediately update the plane's legacy fb pointer, so that later
         * modeset prep sees the state that will be present when the semaphore
         * is released.
index 3446ece21b4ab4e2d38280ad0b2c5733207ffd20..250ed7e3754c078bdde9907a9da027052cb52922 100644 (file)
@@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = {
 };
 
 static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
-       DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
+       DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
                          DRM_ROOT_ONLY),
 };
@@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = {
 
        .enable_vblank = vc4_enable_vblank,
        .disable_vblank = vc4_disable_vblank,
-       .get_vblank_counter = drm_vblank_count,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
 
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = vc4_debugfs_init,
index cb37751bc99fcfe8976ced75f16ce4bd332c2406..861a623bc18536def731d407826cd6061df0643f 100644 (file)
@@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev,
                return -ENOMEM;
 
        /* Make sure that any outstanding modesets have finished. */
-       ret = down_interruptible(&vc4->async_modeset);
-       if (ret) {
-               kfree(c);
-               return ret;
+       if (nonblock) {
+               ret = down_trylock(&vc4->async_modeset);
+               if (ret) {
+                       kfree(c);
+                       return -EBUSY;
+               }
+       } else {
+               ret = down_interruptible(&vc4->async_modeset);
+               if (ret) {
+                       kfree(c);
+                       return ret;
+               }
        }
 
        ret = drm_atomic_helper_prepare_planes(dev, state);
index 6163b95c54111bde8c20bfd8e2efd233cb90b6ae..f99eece4cc97200ba664514dc26941cb6879e77d 100644 (file)
 #define SCALER_DISPLACT0                        0x00000030
 #define SCALER_DISPLACT1                        0x00000034
 #define SCALER_DISPLACT2                        0x00000038
+#define SCALER_DISPLACTX(x)                    (SCALER_DISPLACT0 +     \
+                                                (x) * (SCALER_DISPLACT1 - \
+                                                       SCALER_DISPLACT0))
+
 #define SCALER_DISPCTRL0                        0x00000040
 # define SCALER_DISPCTRLX_ENABLE               BIT(31)
 # define SCALER_DISPCTRLX_RESET                        BIT(30)
index 6de283c8fa3edf0088ca6ee25193802ce64480bf..f0374f9b56cad3a522119f7d4cf8a8940f445bf0 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/frame.h>
 #include <asm/hypervisor.h>
 #include "drmP.h"
 #include "vmwgfx_msg.h"
@@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
 
        return -EINVAL;
 }
-
+STACK_FRAME_NON_STANDARD(vmw_send_msg);
 
 
 /**
@@ -304,6 +305,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 
        return 0;
 }
+STACK_FRAME_NON_STANDARD(vmw_recv_msg);
 
 
 /**
index eb97a9241d171433aa5e4ce60395718fec97af06..15aa49d082c45752fd2751f9229d16bc93196868 100644 (file)
@@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data)
  */
 static int read_registers(struct fam15h_power_data *data)
 {
-       int this_cpu, ret, cpu;
        int core, this_core;
        cpumask_var_t mask;
+       int ret, cpu;
 
        ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
        if (!ret)
@@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data)
        memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
 
        get_online_cpus();
-       this_cpu = smp_processor_id();
 
        /*
         * Choose the first online core of each compute unit, and then
@@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data)
                cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
        }
 
-       if (cpumask_test_cpu(this_cpu, mask))
-               do_read_registers_on_cu(data);
+       on_each_cpu_mask(mask, do_read_registers_on_cu, data, true);
 
-       smp_call_function_many(mask, do_read_registers_on_cu, data, true);
        put_online_cpus();
-
        free_cpumask_var(mask);
 
        return 0;
index c9ff08dbe10cefcf8731c1360370edd037307d43..e30a5939dc0d5566ae76ef31e790a578d8b0877c 100644 (file)
@@ -375,7 +375,7 @@ struct lm90_data {
        int kind;
        u32 flags;
 
-       int update_interval;    /* in milliseconds */
+       unsigned int update_interval; /* in milliseconds */
 
        u8 config_orig;         /* Original configuration register value */
        u8 convrate_orig;       /* Original conversion rate register value */
index c2e257d97effb9813ef22b58ab580507dbf4134d..040966775f40aa370df6df5aadbf91babfe76511 100644 (file)
@@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
 {
        int ret = 0;
        struct net_device *old_net_dev;
+       enum ib_gid_type old_gid_type;
 
        /* in rdma_cap_roce_gid_table, this funciton should be protected by a
         * sleep-able lock.
@@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
        }
 
        old_net_dev = table->data_vec[ix].attr.ndev;
+       old_gid_type = table->data_vec[ix].attr.gid_type;
        if (old_net_dev && old_net_dev != attr->ndev)
                dev_put(old_net_dev);
        /* if modify_gid failed, just delete the old gid */
@@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
                attr = &zattr;
                table->data_vec[ix].context = NULL;
        }
-       if (default_gid)
-               table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+
        memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
        memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
+       if (default_gid) {
+               table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+               if (action == GID_TABLE_WRITE_ACTION_DEL)
+                       table->data_vec[ix].attr.gid_type = old_gid_type;
+       }
        if (table->data_vec[ix].attr.ndev &&
            table->data_vec[ix].attr.ndev != old_net_dev)
                dev_hold(table->data_vec[ix].attr.ndev);
index 1d92e091e22ef0bdb64ee7433eb2cf5fbba9600c..c99525512b3434d24a7882d4f869f6e675da44b2 100644 (file)
@@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
        work->cm_event.event = IB_CM_USER_ESTABLISHED;
 
        /* Check if the device started its remove_one */
-       spin_lock_irq(&cm.lock);
+       spin_lock_irqsave(&cm.lock, flags);
        if (!cm_dev->going_down) {
                queue_delayed_work(cm.wq, &work->work, 0);
        } else {
                kfree(work);
                ret = -ENODEV;
        }
-       spin_unlock_irq(&cm.lock);
+       spin_unlock_irqrestore(&cm.lock, flags);
 
 out:
        return ret;
index 5516fb0703442cafc0c917d3115b8b25e7c71b27..5c155fa91eec8380a463ad687a6aa977c10bcefd 100644 (file)
@@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device,
        if (err || port_attr->subnet_prefix)
                return err;
 
+       if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
+               return 0;
+
        err = ib_query_gid(device, port_num, 0, &gid, NULL);
        if (err)
                return err;
@@ -1024,7 +1027,8 @@ static int __init ib_core_init(void)
                goto err_mad;
        }
 
-       if (ib_add_ibnl_clients()) {
+       ret = ib_add_ibnl_clients();
+       if (ret) {
                pr_warn("Couldn't register ibnl clients\n");
                goto err_sa;
        }
index 43e3fa27102b8cd40fb39dc6e4747deed82c0f75..1c41b95cefec0a9e84a7cb151c2765c4d016fd9a 100644 (file)
@@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
        if (!nlmsg_request) {
                pr_info("%s: Could not find a matching request (seq = %u)\n",
                                 __func__, msg_seq);
-                       return -EINVAL;
+               return -EINVAL;
        }
        pm_msg = nlmsg_request->req_buffer;
        local_sockaddr = (struct sockaddr_storage *)
index 82fb511112da745e3fdbf30fe35bcebfceee7491..2d49228f28b2b18ac537b1bb982127cf66138356 100644 (file)
@@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
                /* Now, check to see if there are any methods still in use */
                if (!check_method_table(method)) {
                        /* If not, release management method table */
-                        kfree(method);
-                        class->method_table[mgmt_class] = NULL;
-                        /* Any management classes left ? */
+                       kfree(method);
+                       class->method_table[mgmt_class] = NULL;
+                       /* Any management classes left ? */
                        if (!check_class_table(class)) {
                                /* If not, release management class table */
                                kfree(class);
index 5e573bb18660d68f9e5c051ce3c7859ce08dea03..a5793c8f15901483e24e24393173ea8dbee6b191 100644 (file)
@@ -889,9 +889,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
 static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
                           u8 port_num)
 {
-       struct attribute_group *hsag = NULL;
+       struct attribute_group *hsag;
        struct rdma_hw_stats *stats;
-       int i = 0, ret;
+       int i, ret;
 
        stats = device->alloc_hw_stats(device, port_num);
 
@@ -899,19 +899,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
                return;
 
        if (!stats->names || stats->num_counters <= 0)
-               goto err;
+               goto err_free_stats;
 
+       /*
+        * Two extra attribue elements here, one for the lifespan entry and
+        * one to NULL terminate the list for the sysfs core code
+        */
        hsag = kzalloc(sizeof(*hsag) +
-                      // 1 extra for the lifespan config entry
-                      sizeof(void *) * (stats->num_counters + 1),
+                      sizeof(void *) * (stats->num_counters + 2),
                       GFP_KERNEL);
        if (!hsag)
-               return;
+               goto err_free_stats;
 
        ret = device->get_hw_stats(device, stats, port_num,
                                   stats->num_counters);
        if (ret != stats->num_counters)
-               goto err;
+               goto err_free_hsag;
 
        stats->timestamp = jiffies;
 
@@ -922,10 +925,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
                hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
                if (!hsag->attrs[i])
                        goto err;
+               sysfs_attr_init(hsag->attrs[i]);
        }
 
        /* treat an error here as non-fatal */
        hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
+       if (hsag->attrs[i])
+               sysfs_attr_init(hsag->attrs[i]);
 
        if (port) {
                struct kobject *kobj = &port->kobj;
@@ -946,10 +952,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
        return;
 
 err:
-       kfree(stats);
        for (; i >= 0; i--)
                kfree(hsag->attrs[i]);
+err_free_hsag:
        kfree(hsag);
+err_free_stats:
+       kfree(stats);
        return;
 }
 
index 6e7050ab9e16049039406b8cbb207cdfced18109..14d7eeb09be6545f5f21144f0f11ea41c53203a4 100644 (file)
@@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
        const struct cpumask *node_mask,
                *proc_mask = tsk_cpus_allowed(current);
        struct cpu_mask_set *set = &dd->affinity->proc;
-       char buf[1024];
 
        /*
         * check whether process/context affinity has already
         * been set
         */
        if (cpumask_weight(proc_mask) == 1) {
-               scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
-               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
-                         current->pid, current->comm, buf);
+               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+                         current->pid, current->comm,
+                         cpumask_pr_args(proc_mask));
                /*
                 * Mark the pre-set CPU as used. This is atomic so we don't
                 * need the lock
@@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
                cpumask_set_cpu(cpu, &set->used);
                goto done;
        } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
-               scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
-               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
-                         current->pid, current->comm, buf);
+               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+                         current->pid, current->comm,
+                         cpumask_pr_args(proc_mask));
                goto done;
        }
 
@@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
        cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
                                  &dd->affinity->rcv_intr.mask :
                                  &dd->affinity->rcv_intr.used));
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
-       hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
+       hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
+                 cpumask_pr_args(intrs));
 
        /*
         * If we don't have a NUMA node requested, preference is towards
@@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
        if (node == -1)
                node = dd->node;
        node_mask = cpumask_of_node(node);
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
-       hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
+       hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
+                 cpumask_pr_args(node_mask));
 
        /* diff will hold all unused cpus */
        cpumask_andnot(diff, &set->mask, &set->used);
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
-       hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
+       hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
 
        /* get cpumask of available CPUs on preferred NUMA */
        cpumask_and(mask, diff, node_mask);
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
-       hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
+       hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
 
        /*
         * At first, we don't want to place processes on the same
@@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
                cpumask_andnot(diff, &set->mask, &set->used);
                cpumask_andnot(mask, diff, node_mask);
        }
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
-       hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
+       hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
+                 cpumask_pr_args(mask));
 
        cpu = cpumask_first(mask);
        if (cpu >= nr_cpu_ids) /* empty */
index 3b876da745a1dc60d2b589b5fbe756cdc33419aa..81619fbb5842dc8fdd4b0b344f5333b711d7e77e 100644 (file)
@@ -7832,8 +7832,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
                         * save first 2 flits in the packet that caused
                         * the error
                         */
-                        dd->err_info_rcvport.packet_flit1 = hdr0;
-                        dd->err_info_rcvport.packet_flit2 = hdr1;
+                       dd->err_info_rcvport.packet_flit1 = hdr0;
+                       dd->err_info_rcvport.packet_flit2 = hdr1;
                }
                switch (info) {
                case 1:
@@ -11906,7 +11906,7 @@ static void update_synth_timer(unsigned long opaque)
                hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
        }
 
-mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+       mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
 }
 
 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
index 5cc492e5776d8d87f3833136ed6d33b05f700da0..0d28a5a40fae5c2b95daf32168b77e1e097be328 100644 (file)
@@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
                dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
                                  (void *)dd->rcvhdrtail_dummy_kvaddr,
                                  dd->rcvhdrtail_dummy_physaddr);
-                                 dd->rcvhdrtail_dummy_kvaddr = NULL;
+               dd->rcvhdrtail_dummy_kvaddr = NULL;
        }
 
        for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
index 79b2952c0dfb31a50f0f08e4a74d3f60b754e861..4cfb13771897ca35b91e476c17afabed48a9e5ce 100644 (file)
@@ -214,19 +214,6 @@ const char *print_u32_array(
        return ret;
 }
 
-const char *print_u64_array(
-       struct trace_seq *p,
-       u64 *arr, int len)
-{
-       int i;
-       const char *ret = trace_seq_buffer_ptr(p);
-
-       for (i = 0; i < len; i++)
-               trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
-       trace_seq_putc(p, 0);
-       return ret;
-}
-
 __hfi1_trace_fn(PKT);
 __hfi1_trace_fn(PROC);
 __hfi1_trace_fn(SDMA);
index 29f4795f866cd19185c680d28f95302e206f62e4..47ffd273ecbd7745d25067f4f8268301d89dde25 100644 (file)
@@ -183,7 +183,7 @@ struct user_sdma_iovec {
        struct sdma_mmu_node *node;
 };
 
-#define SDMA_CACHE_NODE_EVICT BIT(0)
+#define SDMA_CACHE_NODE_EVICT 0
 
 struct sdma_mmu_node {
        struct mmu_rb_node rb;
@@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req,
                 */
                SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
                         req->tidoffset, req->tidoffset / req->omfactor,
-                        !!(req->omfactor - KDETH_OM_SMALL));
+                        req->omfactor != KDETH_OM_SMALL);
                KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
                          req->tidoffset / req->omfactor);
                KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
-                         !!(req->omfactor - KDETH_OM_SMALL));
+                         req->omfactor != KDETH_OM_SMALL);
        }
 done:
        trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
index b01ef6eee6e838b550758c8e4865bab7c1fceb09..0eb09e10454215a1880fe796f38aeb198e528003 100644 (file)
@@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
                        props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
                else
                        props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
-       if (dev->steering_support ==  MLX4_STEERING_MODE_DEVICE_MANAGED)
-               props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
        }
+       if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+               props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
 
        props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
 
index dabcc65bd65e3b6a93adb47b9b53bbd67d08e15c..9c0e67bd2ba7326cf711e3eba0cf2e6bbd630d3e 100644 (file)
@@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
        int eqn;
        int err;
 
-       if (entries < 0)
+       if (entries < 0 ||
+           (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
                return ERR_PTR(-EINVAL);
 
        if (check_cq_create_flags(attr->flags))
@@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                return -ENOSYS;
        }
 
-       if (entries < 1)
+       if (entries < 1 ||
+           entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+               mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+                            entries,
+                            1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
                return -EINVAL;
+       }
 
        entries = roundup_pow_of_two(entries + 1);
-       if (entries >  (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+       if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
                return -EINVAL;
 
        if (entries == ibcq->cqe + 1)
index c72797cd9e4f199302ededac309b133d4ddd1b27..b48ad85315dc671da59f0453e8d1393a3c3d1a43 100644 (file)
@@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
            MLX5_CAP_ETH(dev->mdev, scatter_fcs))
                props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
 
+       if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+               props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
        props->vendor_part_id      = mdev->pdev->device;
        props->hw_ver              = mdev->pdev->revision;
 
@@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
        gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
        resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
-       resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+       if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+               resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
        resp.cache_line_size = L1_CACHE_BYTES;
        resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
        resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
@@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        if (field_avail(typeof(resp), cqe_version, udata->outlen))
                resp.response_length += sizeof(resp.cqe_version);
 
-       if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+       /*
+        * We don't want to expose information from the PCI bar that is located
+        * after 4096 bytes, so if the arch only supports larger pages, let's
+        * pretend we don't support reading the HCA's core clock. This is also
+        * forced by mmap function.
+        */
+       if (PAGE_SIZE <= 4096 &&
+           field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
                resp.comp_mask |=
                        MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
                resp.hca_core_clock_offset =
@@ -1798,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
 {
        struct mlx5_ib_dev *dev =
                container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-       return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+       return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
                       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
 }
 
@@ -1866,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
                break;
 
        case MLX5_DEV_EVENT_PORT_DOWN:
+       case MLX5_DEV_EVENT_PORT_INITIALIZED:
                ibev.event = IB_EVENT_PORT_ERR;
                port = (u8)param;
                break;
 
-       case MLX5_DEV_EVENT_PORT_INITIALIZED:
-               /* not used by ULPs */
-               return;
-
        case MLX5_DEV_EVENT_LID_CHANGE:
                ibev.event = IB_EVENT_LID_CHANGE;
                port = (u8)param;
index 504117657d41ffccd9d65467b748bede40fb1054..ce434228a5ead2a90a6375a78203a7591b5f5ae1 100644 (file)
@@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
                qp->rq.max_gs = 0;
                qp->rq.wqe_cnt = 0;
                qp->rq.wqe_shift = 0;
+               cap->max_recv_wr = 0;
+               cap->max_recv_sge = 0;
        } else {
                if (ucmd) {
                        qp->rq.wqe_cnt = ucmd->rq_wqe_count;
@@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                         const struct ib_ah_attr *ah,
                         struct mlx5_qp_path *path, u8 port, int attr_mask,
-                        u32 path_flags, const struct ib_qp_attr *attr)
+                        u32 path_flags, const struct ib_qp_attr *attr,
+                        bool alt)
 {
        enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
        int err;
 
        if (attr_mask & IB_QP_PKEY_INDEX)
-               path->pkey_index = attr->pkey_index;
+               path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
+                                                    attr->pkey_index);
 
        if (ah->ah_flags & IB_AH_GRH) {
                if (ah->grh.sgid_index >=
@@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                                                          ah->grh.sgid_index);
                path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
        } else {
-               path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
-               path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
-                                                                       0;
+               path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
+               path->fl_free_ar |=
+                       (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
                path->rlid = cpu_to_be16(ah->dlid);
                path->grh_mlid = ah->src_path_bits & 0x7f;
                if (ah->ah_flags & IB_AH_GRH)
@@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        path->port = port;
 
        if (attr_mask & IB_QP_TIMEOUT)
-               path->ackto_lt = attr->timeout << 3;
+               path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
 
        if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
                return modify_raw_packet_eth_prio(dev->mdev,
@@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
 
        if (attr_mask & IB_QP_PKEY_INDEX)
-               context->pri_path.pkey_index = attr->pkey_index;
+               context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
 
        /* todo implement counter_index functionality */
 
@@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        if (attr_mask & IB_QP_AV) {
                err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
                                    attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
-                                   attr_mask, 0, attr);
+                                   attr_mask, 0, attr, false);
                if (err)
                        goto out;
        }
@@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        if (attr_mask & IB_QP_ALT_PATH) {
                err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
                                    &context->alt_path,
-                                   attr->alt_port_num, attr_mask, 0, attr);
+                                   attr->alt_port_num,
+                                   attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+                                   0, attr, true);
                if (err)
                        goto out;
        }
@@ -4013,11 +4019,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
                to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
                to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
-               qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
+               qp_attr->alt_pkey_index =
+                       be16_to_cpu(context->alt_path.pkey_index);
                qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
        }
 
-       qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
+       qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
        qp_attr->port_num = context->pri_path.port;
 
        /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
@@ -4079,17 +4086,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
        qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
 
        if (!ibqp->uobject) {
-               qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
+               qp_attr->cap.max_send_wr  = qp->sq.max_post;
                qp_attr->cap.max_send_sge = qp->sq.max_gs;
+               qp_init_attr->qp_context = ibqp->qp_context;
        } else {
                qp_attr->cap.max_send_wr  = 0;
                qp_attr->cap.max_send_sge = 0;
        }
 
-       /* We don't support inline sends for kernel QPs (yet), and we
-        * don't know what userspace's value should be.
-        */
-       qp_attr->cap.max_inline_data = 0;
+       qp_init_attr->qp_type = ibqp->qp_type;
+       qp_init_attr->recv_cq = ibqp->recv_cq;
+       qp_init_attr->send_cq = ibqp->send_cq;
+       qp_init_attr->srq = ibqp->srq;
+       qp_attr->cap.max_inline_data = qp->max_inline_data;
 
        qp_init_attr->cap            = qp_attr->cap;
 
index 7209fbc03ccb90a7360f403fe99303b58c7932ee..a0b6ebee4d8a047e2fdffb8bbd831e5c36107fec 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
 #include <linux/iommu.h>
 #include <linux/workqueue.h>
 #include <linux/list.h>
@@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
        int i;
        int flags;
        dma_addr_t pa;
-       DEFINE_DMA_ATTRS(attrs);
-
-       if (dmasync)
-               dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
        if (!can_do_mlock())
                return -EPERM;
index 5fa4d4d81ee0cfcc5389897c904e343ff4903897..7de5134bec851987b83249247b14cc1a6d49cc4d 100644 (file)
@@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
  */
 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
                  enum ib_qp_type type)
+       __releases(&qp->s_lock)
+       __releases(&qp->s_hlock)
+       __releases(&qp->r_lock)
+       __acquires(&qp->r_lock)
+       __acquires(&qp->s_hlock)
+       __acquires(&qp->s_lock)
 {
        if (qp->state != IB_QPS_RESET) {
                qp->state = IB_QPS_RESET;
index bab7db6fa9abf8c045b9e1b7e7a55f8996bf3745..4f7d9b48df643c7ef5f69b80f870a0f15c6efe87 100644 (file)
@@ -94,6 +94,7 @@ enum {
        IPOIB_NEIGH_TBL_FLUSH     = 12,
        IPOIB_FLAG_DEV_ADDR_SET   = 13,
        IPOIB_FLAG_DEV_ADDR_CTRL  = 14,
+       IPOIB_FLAG_GOING_DOWN     = 15,
 
        IPOIB_MAX_BACKOFF_SECONDS = 16,
 
index b2f42835d76d51cfed64266d6f884f46de3a11d1..951d9abcca8b283e652368c7c9c96f1fa25f3df4 100644 (file)
@@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
 {
        struct net_device *dev = to_net_dev(d);
        int ret;
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+       if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
+               return -EPERM;
 
        if (!rtnl_trylock())
                return restart_syscall();
index 45c40a17d6a6ffc60cac0cab92bc24c689f75c83..dc6d241b9406e9661394c7e985fda4b0eacbfa6b 100644 (file)
@@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
        if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
                return false;
 
-       netif_addr_lock(priv->dev);
+       netif_addr_lock_bh(priv->dev);
 
        /* The subnet prefix may have changed, update it now so we won't have
         * to do it later
@@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
 
        search_gid.global.interface_id = priv->local_gid.global.interface_id;
 
-       netif_addr_unlock(priv->dev);
+       netif_addr_unlock_bh(priv->dev);
 
        err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
                          priv->dev, &port, &index);
 
-       netif_addr_lock(priv->dev);
+       netif_addr_lock_bh(priv->dev);
 
        if (search_gid.global.interface_id !=
            priv->local_gid.global.interface_id)
@@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
        }
 
 out:
-       netif_addr_unlock(priv->dev);
+       netif_addr_unlock_bh(priv->dev);
 
        return ret;
 }
index 2d7c1634664826a83f1f4efe42b1004c3b23d09f..5f58c41ef787d22692920855f397c1bbcb079e6e 100644 (file)
@@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
                                neigh = NULL;
                                goto out_unlock;
                        }
-                       neigh->alive = jiffies;
+
+                       if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+                               neigh->alive = jiffies;
                        goto out_unlock;
                }
        }
@@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
        struct ipoib_dev_priv *child_priv;
        struct net_device *netdev = priv->dev;
 
-       netif_addr_lock(netdev);
+       netif_addr_lock_bh(netdev);
 
        memcpy(&priv->local_gid.global.interface_id,
               &gid->global.interface_id,
@@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
        memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
        clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
 
-       netif_addr_unlock(netdev);
+       netif_addr_unlock_bh(netdev);
 
        if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
                down_read(&priv->vlan_rwsem);
@@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
        union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
        int ret = 0;
 
-       netif_addr_lock(dev);
+       netif_addr_lock_bh(dev);
 
        /* Make sure the QPN, reserved and subnet prefix match the current
         * lladdr, it also makes sure the lladdr is unicast.
@@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
            gid->global.interface_id == 0)
                ret = -EINVAL;
 
-       netif_addr_unlock(dev);
+       netif_addr_unlock_bh(dev);
 
        return ret;
 }
@@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
                ib_unregister_event_handler(&priv->event_handler);
                flush_workqueue(ipoib_workqueue);
 
+               /* mark interface in the middle of destruction */
+               set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+
                rtnl_lock();
                dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
                rtnl_unlock();
index 82fbc9442608f6e10fb0b851a08baf32cfacc5c8..d3394b6add24a0303dd51710d72f86087a36c7fe 100644 (file)
@@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
                return;
        }
        priv->local_lid = port_attr.lid;
-       netif_addr_lock(dev);
+       netif_addr_lock_bh(dev);
 
        if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
-               netif_addr_unlock(dev);
+               netif_addr_unlock_bh(dev);
                return;
        }
-       netif_addr_unlock(dev);
+       netif_addr_unlock_bh(dev);
 
        spin_lock_irq(&priv->lock);
        if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
index 64a35595eab83783ade2451f928832548fcb7271..a2f9f29c6ab589e746170ac6463dd6c7781cd20e 100644 (file)
@@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 
        ppriv = netdev_priv(pdev);
 
+       if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+               return -EPERM;
+
        snprintf(intf_name, sizeof intf_name, "%s.%04x",
                 ppriv->dev->name, pkey);
        priv = ipoib_intf_alloc(intf_name);
@@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
 
        ppriv = netdev_priv(pdev);
 
+       if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+               return -EPERM;
+
        if (!rtnl_trylock())
                return restart_syscall();
 
index 646de170ec12e44a9eb20661fa4614e54dd0afe2..3322ed750172ea78f2f57cc1ad08f6f9946eca63 100644 (file)
@@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
 {
        unsigned int sg_offset = 0;
 
-       state->desc = req->indirect_desc;
        state->fr.next = req->fr_list;
        state->fr.end = req->fr_list + ch->target->mr_per_cmd;
        state->sg = scat;
@@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
        struct scatterlist *sg;
        int i;
 
-       state->desc = req->indirect_desc;
        for_each_sg(scat, sg, count, i) {
                srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
                             ib_sg_dma_len(dev->dev, sg),
@@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
                                   target->indirect_size, DMA_TO_DEVICE);
 
        memset(&state, 0, sizeof(state));
+       state.desc = req->indirect_desc;
        if (dev->use_fast_reg)
                ret = srp_map_sg_fr(&state, ch, req, scat, count);
        else if (dev->use_fmr)
@@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device)
        int mr_page_shift, p;
        u64 max_pages_per_mr;
 
-       srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
+       srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
        if (!srp_dev)
                return;
 
@@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device)
                                                   IB_ACCESS_REMOTE_WRITE);
                if (IS_ERR(srp_dev->global_mr))
                        goto err_pd;
-       } else {
-               srp_dev->global_mr = NULL;
        }
 
        for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
index c5fe915870ad4184dbf1c909f8d8714a2008e9c5..a59d55e25d5f02cbc69d903daaa577e21bafc10f 100644 (file)
@@ -12895,52 +12895,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
        return rc;
 }
 
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
 {
        struct bnx2x_vlan_entry *vlan;
        int rc = 0;
 
-       if (!bp->vlan_cnt) {
-               DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
-               return 0;
-       }
-
+       /* Configure all non-configured entries */
        list_for_each_entry(vlan, &bp->vlan_reg, link) {
-               /* Prepare for cleanup in case of errors */
-               if (rc) {
-                       vlan->hw = false;
-                       continue;
-               }
-
-               if (!vlan->hw)
+               if (vlan->hw)
                        continue;
 
-               DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+               if (bp->vlan_cnt >= bp->vlan_credit)
+                       return -ENOBUFS;
 
                rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
                if (rc) {
-                       BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
-                       vlan->hw = false;
-                       rc = -EINVAL;
-                       continue;
+                       BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+                       return rc;
                }
+
+               DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+               vlan->hw = true;
+               bp->vlan_cnt++;
        }
 
-       return rc;
+       return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+       bool need_accept_any_vlan;
+
+       need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+       if (bp->accept_any_vlan != need_accept_any_vlan) {
+               bp->accept_any_vlan = need_accept_any_vlan;
+               DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+                  bp->accept_any_vlan ? "raised" : "cleared");
+               if (set_rx_mode) {
+                       if (IS_PF(bp))
+                               bnx2x_set_rx_mode_inner(bp);
+                       else
+                               bnx2x_vfpf_storm_rx_mode(bp);
+               }
+       }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+       struct bnx2x_vlan_entry *vlan;
+
+       /* The hw forgot all entries after reload */
+       list_for_each_entry(vlan, &bp->vlan_reg, link)
+               vlan->hw = false;
+       bp->vlan_cnt = 0;
+
+       /* Don't set rx mode here. Our caller will do it. */
+       bnx2x_vlan_configure(bp, false);
+
+       return 0;
 }
 
 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
        struct bnx2x *bp = netdev_priv(dev);
        struct bnx2x_vlan_entry *vlan;
-       bool hw = false;
-       int rc = 0;
-
-       if (!netif_running(bp->dev)) {
-               DP(NETIF_MSG_IFUP,
-                  "Ignoring VLAN configuration the interface is down\n");
-               return -EFAULT;
-       }
 
        DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
 
@@ -12948,93 +12967,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        if (!vlan)
                return -ENOMEM;
 
-       bp->vlan_cnt++;
-       if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
-               DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
-               bp->accept_any_vlan = true;
-               if (IS_PF(bp))
-                       bnx2x_set_rx_mode_inner(bp);
-               else
-                       bnx2x_vfpf_storm_rx_mode(bp);
-       } else if (bp->vlan_cnt <= bp->vlan_credit) {
-               rc = __bnx2x_vlan_configure_vid(bp, vid, true);
-               hw = true;
-       }
-
        vlan->vid = vid;
-       vlan->hw = hw;
+       vlan->hw = false;
+       list_add_tail(&vlan->link, &bp->vlan_reg);
 
-       if (!rc) {
-               list_add(&vlan->link, &bp->vlan_reg);
-       } else {
-               bp->vlan_cnt--;
-               kfree(vlan);
-       }
-
-       DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+       if (netif_running(dev))
+               bnx2x_vlan_configure(bp, true);
 
-       return rc;
+       return 0;
 }
 
 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
        struct bnx2x *bp = netdev_priv(dev);
        struct bnx2x_vlan_entry *vlan;
+       bool found = false;
        int rc = 0;
 
-       if (!netif_running(bp->dev)) {
-               DP(NETIF_MSG_IFUP,
-                  "Ignoring VLAN configuration the interface is down\n");
-               return -EFAULT;
-       }
-
        DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
 
-       if (!bp->vlan_cnt) {
-               BNX2X_ERR("Unable to kill VLAN %d\n", vid);
-               return -EINVAL;
-       }
-
        list_for_each_entry(vlan, &bp->vlan_reg, link)
-               if (vlan->vid == vid)
+               if (vlan->vid == vid) {
+                       found = true;
                        break;
+               }
 
-       if (vlan->vid != vid) {
+       if (!found) {
                BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
                return -EINVAL;
        }
 
-       if (vlan->hw)
+       if (netif_running(dev) && vlan->hw) {
                rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+               DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+               bp->vlan_cnt--;
+       }
 
        list_del(&vlan->link);
        kfree(vlan);
 
-       bp->vlan_cnt--;
-
-       if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
-               /* Configure all non-configured entries */
-               list_for_each_entry(vlan, &bp->vlan_reg, link) {
-                       if (vlan->hw)
-                               continue;
-
-                       rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
-                       if (rc) {
-                               BNX2X_ERR("Unable to config VLAN %d\n",
-                                         vlan->vid);
-                               continue;
-                       }
-                       DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
-                          vlan->vid);
-                       vlan->hw = true;
-               }
-               DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
-               bp->accept_any_vlan = false;
-               if (IS_PF(bp))
-                       bnx2x_set_rx_mode_inner(bp);
-               else
-                       bnx2x_vfpf_storm_rx_mode(bp);
-       }
+       if (netif_running(dev))
+               bnx2x_vlan_configure(bp, true);
 
        DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
 
index 72a2efff8e494c7f077b2ee1311bdd54e857ca03..c777cde85ce416d98f3959178c193c0cca4b6c1c 100644 (file)
@@ -286,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
                txr->tx_prod = prod;
 
+               tx_buf->is_push = 1;
                netdev_tx_sent_queue(txq, skb->len);
+               wmb();  /* Sync is_push and byte queue before pushing data */
 
                push_len = (length + sizeof(*tx_push) + 7) / 8;
                if (push_len > 16) {
@@ -298,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                         push_len);
                }
 
-               tx_buf->is_push = 1;
                goto tx_done;
        }
 
@@ -1112,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
        if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
                skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
 
-       if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
-               netdev_features_t features = skb->dev->features;
+       if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+           (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                u16 vlan_proto = tpa_info->metadata >>
                        RX_CMP_FLAGS2_METADATA_TPID_SFT;
+               u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
 
-               if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
-                    vlan_proto == ETH_P_8021Q) ||
-                   ((features & NETIF_F_HW_VLAN_STAG_RX) &&
-                    vlan_proto == ETH_P_8021AD)) {
-                       __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
-                                              tpa_info->metadata &
-                                              RX_CMP_FLAGS2_METADATA_VID_MASK);
-               }
+               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
        }
 
        skb_checksum_none_assert(skb);
@@ -1277,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 
        skb->protocol = eth_type_trans(skb, dev);
 
-       if (rxcmp1->rx_cmp_flags2 &
-           cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
-               netdev_features_t features = skb->dev->features;
+       if ((rxcmp1->rx_cmp_flags2 &
+            cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+           (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+               u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
                u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
 
-               if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
-                    vlan_proto == ETH_P_8021Q) ||
-                   ((features & NETIF_F_HW_VLAN_STAG_RX) &&
-                    vlan_proto == ETH_P_8021AD))
-                       __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
-                                              meta_data &
-                                              RX_CMP_FLAGS2_METADATA_VID_MASK);
+               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
        }
 
        skb_checksum_none_assert(skb);
@@ -5466,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
 
        if (!bnxt_rfs_capable(bp))
                features &= ~NETIF_F_NTUPLE;
+
+       /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+        * turned on or off together.
+        */
+       if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+           (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+               if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+                       features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+                                     NETIF_F_HW_VLAN_STAG_RX);
+               else
+                       features |= NETIF_F_HW_VLAN_CTAG_RX |
+                                   NETIF_F_HW_VLAN_STAG_RX;
+       }
+
        return features;
 }
 
index a2cdfc1261dc77092049575f5519d7160a1c8f29..50812a1d67bdf8aab1ca8cb5fe560c7c5e2cb778 100644 (file)
@@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
        CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
        CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+       CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
        CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
        CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
        CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
index 41b01064510098c00a66d86bb49cf2f3fe02f9c3..4edb98c3c6c70a9c620caa7928e8e5d555db1242 100644 (file)
@@ -1195,7 +1195,7 @@ static int ethoc_probe(struct platform_device *pdev)
        priv->mdio = mdiobus_alloc();
        if (!priv->mdio) {
                ret = -ENOMEM;
-               goto free;
+               goto free2;
        }
 
        priv->mdio->name = "ethoc-mdio";
@@ -1208,7 +1208,7 @@ static int ethoc_probe(struct platform_device *pdev)
        ret = mdiobus_register(priv->mdio);
        if (ret) {
                dev_err(&netdev->dev, "failed to register MDIO bus\n");
-               goto free;
+               goto free2;
        }
 
        ret = ethoc_mdio_probe(netdev);
@@ -1241,9 +1241,10 @@ error2:
 error:
        mdiobus_unregister(priv->mdio);
        mdiobus_free(priv->mdio);
-free:
+free2:
        if (priv->clk)
                clk_disable_unprepare(priv->clk);
+free:
        free_netdev(netdev);
 out:
        return ret;
index 3c0255e98535f3c43948ae0b7e116643e2e39287..fea0f330ddbdeb2a5cf99b81f30e46919d066f62 100644 (file)
@@ -2416,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
                return -EOPNOTSUPP;
 
        if (ec->rx_max_coalesced_frames > 255) {
-               pr_err("Rx coalesced frames exceed hardware limiation");
+               pr_err("Rx coalesced frames exceed hardware limitation\n");
                return -EINVAL;
        }
 
        if (ec->tx_max_coalesced_frames > 255) {
-               pr_err("Tx coalesced frame exceed hardware limiation");
+               pr_err("Tx coalesced frame exceed hardware limitation\n");
                return -EINVAL;
        }
 
        cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
        if (cycle > 0xFFFF) {
-               pr_err("Rx coalesed usec exceeed hardware limiation");
+               pr_err("Rx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
        cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
        if (cycle > 0xFFFF) {
-               pr_err("Rx coalesed usec exceeed hardware limiation");
+               pr_err("Rx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
index 7615e0668acbf9b223b2087ed89646146dc6e827..2e6785b6e8bee1d971d739e160abe1ce17da9ca0 100644 (file)
@@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                                 tx_queue->tx_ring_size);
 
        if (likely(!nr_frags)) {
-               lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+               if (likely(!do_tstamp))
+                       lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
        } else {
                u32 lstatus_start = lstatus;
 
index c984462fad2a26fa267faab7707181a19c1d1814..4763252bbf8557ac98c73876e685d4b7413e3f43 100644 (file)
@@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
 static void mtk_phy_link_adjust(struct net_device *dev)
 {
        struct mtk_mac *mac = netdev_priv(dev);
+       u16 lcl_adv = 0, rmt_adv = 0;
+       u8 flowctrl;
        u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
                  MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
                  MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
        if (mac->phy_dev->link)
                mcr |= MAC_MCR_FORCE_LINK;
 
-       if (mac->phy_dev->duplex)
+       if (mac->phy_dev->duplex) {
                mcr |= MAC_MCR_FORCE_DPX;
 
-       if (mac->phy_dev->pause)
-               mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+               if (mac->phy_dev->pause)
+                       rmt_adv = LPA_PAUSE_CAP;
+               if (mac->phy_dev->asym_pause)
+                       rmt_adv |= LPA_PAUSE_ASYM;
+
+               if (mac->phy_dev->advertising & ADVERTISED_Pause)
+                       lcl_adv |= ADVERTISE_PAUSE_CAP;
+               if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+                       lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+               flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+               if (flowctrl & FLOW_CTRL_TX)
+                       mcr |= MAC_MCR_FORCE_TX_FC;
+               if (flowctrl & FLOW_CTRL_RX)
+                       mcr |= MAC_MCR_FORCE_RX_FC;
+
+               netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+                         flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+                         flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+       }
 
        mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
 
@@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        u32 val, ge_mode;
 
        np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+       if (!np && of_phy_is_fixed_link(mac->of_node))
+               if (!of_phy_register_fixed_link(mac->of_node))
+                       np = of_node_get(mac->of_node);
        if (!np)
                return -ENODEV;
 
        switch (of_get_phy_mode(np)) {
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_ID:
        case PHY_INTERFACE_MODE_RGMII:
                ge_mode = 0;
                break;
@@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        mac->phy_dev->autoneg = AUTONEG_ENABLE;
        mac->phy_dev->speed = 0;
        mac->phy_dev->duplex = 0;
-       mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+       mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+                                  SUPPORTED_Asym_Pause;
        mac->phy_dev->advertising = mac->phy_dev->supported |
                                    ADVERTISED_Autoneg;
        phy_start_aneg(mac->phy_dev);
@@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
        return 0;
 
 err_free_bus:
-       kfree(eth->mii_bus);
+       mdiobus_free(eth->mii_bus);
 
 err_put_node:
        of_node_put(mii_np);
@@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
 
        mdiobus_unregister(eth->mii_bus);
        of_node_put(eth->mii_bus->dev.of_node);
-       kfree(eth->mii_bus);
+       mdiobus_free(eth->mii_bus);
 }
 
 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
index fd4392999eeefcd9a2f77b05583b04d04a10d663..f5c8d5db25a8cebc367c752c4333bc839b72505a 100644 (file)
@@ -3192,10 +3192,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        flush_workqueue(priv->wq);
        if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
                netif_device_detach(netdev);
-               mutex_lock(&priv->state_lock);
-               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-                       mlx5e_close_locked(netdev);
-               mutex_unlock(&priv->state_lock);
+               mlx5e_close(netdev);
        } else {
                unregister_netdev(netdev);
        }
index 229ab16fb8d3a5c92afec2ece0a30e2de9a46236..b000ddc29553bb40e297f075c1a3f7606487a87e 100644 (file)
@@ -317,7 +317,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        while ((sq->pc & wq->sz_m1) > sq->edge)
                mlx5e_send_nop(sq, false);
 
-       sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+       if (bf)
+               sq->bf_budget--;
 
        sq->stats.packets++;
        sq->stats.bytes += num_bytes;
index b84a6918a7006253c503cb924c347f2123854781..aebbd6ccb9fe29fb90d46518f854252236d35934 100644 (file)
@@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
                                   0, &dest);
-       if (IS_ERR_OR_NULL(flow_rule)) {
+       if (IS_ERR(flow_rule)) {
                pr_warn(
                        "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
                         dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
@@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
 
        table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
        fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
-       if (IS_ERR_OR_NULL(fdb)) {
+       if (IS_ERR(fdb)) {
                err = PTR_ERR(fdb);
                esw_warn(dev, "Failed to create FDB Table err %d\n", err);
                goto out;
@@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
        eth_broadcast_addr(dmac);
        g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create flow group err(%d)\n", err);
                goto out;
@@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
        eth_zero_addr(dmac);
        dmac[0] = 0x01;
        g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
                goto out;
@@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
        g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
                goto out;
@@ -529,7 +529,7 @@ out:
                }
        }
 
-       kfree(flow_group_in);
+       kvfree(flow_group_in);
        return err;
 }
 
@@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
                                        esw_fdb_set_vport_rule(esw,
                                                               mac,
                                                               vport_idx);
+                       iter_vaddr->mc_promisc = true;
                        break;
                case MLX5_ACTION_DEL:
                        if (!iter_vaddr)
@@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
                return;
 
        acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
-       if (IS_ERR_OR_NULL(acl)) {
+       if (IS_ERR(acl)) {
                err = PTR_ERR(acl);
                esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
                         vport->vport, err);
@@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
 
        vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(vlan_grp)) {
+       if (IS_ERR(vlan_grp)) {
                err = PTR_ERR(vlan_grp);
                esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
                         vport->vport, err);
@@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
        drop_grp = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(drop_grp)) {
+       if (IS_ERR(drop_grp)) {
                err = PTR_ERR(drop_grp);
                esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
                         vport->vport, err);
@@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        vport->egress.drop_grp = drop_grp;
        vport->egress.allowed_vlans_grp = vlan_grp;
 out:
-       kfree(flow_group_in);
+       kvfree(flow_group_in);
        if (err && !IS_ERR_OR_NULL(vlan_grp))
                mlx5_destroy_flow_group(vlan_grp);
        if (err && !IS_ERR_OR_NULL(acl))
@@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
                return;
 
        acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
-       if (IS_ERR_OR_NULL(acl)) {
+       if (IS_ERR(acl)) {
                err = PTR_ERR(acl);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
                         vport->vport, err);
@@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
                         vport->vport, err);
@@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
                         vport->vport, err);
@@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
                         vport->vport, err);
@@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
                         vport->vport, err);
@@ -1259,7 +1260,7 @@ out:
                        mlx5_destroy_flow_table(vport->ingress.acl);
        }
 
-       kfree(flow_group_in);
+       kvfree(flow_group_in);
 }
 
 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+       if (IS_ERR(vport->ingress.allow_rule)) {
                err = PTR_ERR(vport->ingress.allow_rule);
                pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
                        vport->vport, err);
@@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_DROP,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+       if (IS_ERR(vport->ingress.drop_rule)) {
                err = PTR_ERR(vport->ingress.drop_rule);
                pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
                        vport->vport, err);
@@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+       if (IS_ERR(vport->egress.allowed_vlan)) {
                err = PTR_ERR(vport->egress.allowed_vlan);
                pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
                        vport->vport, err);
@@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_DROP,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+       if (IS_ERR(vport->egress.drop_rule)) {
                err = PTR_ERR(vport->egress.drop_rule);
                pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
                        vport->vport, err);
@@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 
        /* Sync with current vport context */
        vport->enabled_events = enable_events;
-       esw_vport_change_handle_locked(vport);
-
        vport->enabled = true;
 
        /* only PF is trusted by default */
        vport->trusted = (vport_num) ? false : true;
-
-       arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+       esw_vport_change_handle_locked(vport);
 
        esw->enabled_vports++;
        esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
@@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
        (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
 
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+       ((u8 *)node_guid)[7] = mac[0];
+       ((u8 *)node_guid)[6] = mac[1];
+       ((u8 *)node_guid)[5] = mac[2];
+       ((u8 *)node_guid)[4] = 0xff;
+       ((u8 *)node_guid)[3] = 0xfe;
+       ((u8 *)node_guid)[2] = mac[3];
+       ((u8 *)node_guid)[1] = mac[4];
+       ((u8 *)node_guid)[0] = mac[5];
+}
+
 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                               int vport, u8 mac[ETH_ALEN])
 {
-       int err = 0;
        struct mlx5_vport *evport;
+       u64 node_guid;
+       int err = 0;
 
        if (!ESW_ALLOWED(esw))
                return -EPERM;
@@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                return err;
        }
 
+       node_guid_gen_from_mac(&node_guid, mac);
+       err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+       if (err)
+               mlx5_core_warn(esw->dev,
+                              "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+                              vport, err);
+
        mutex_lock(&esw->state_lock);
        if (evport->enabled)
                err = esw_vport_ingress_config(esw, evport);
        mutex_unlock(&esw->state_lock);
-
        return err;
 }
 
index 8b5f0b2c0d5cd5d8f62cf6560ee9fa498800ac03..e912a3d2505ed39bee13717224e1c41111a29cda 100644 (file)
@@ -1292,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
                                       ft->id);
                        return err;
                }
-               root->root_ft = new_root_ft;
        }
+       root->root_ft = new_root_ft;
        return 0;
 }
 
@@ -1767,6 +1767,9 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev)
 
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
 {
+       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return;
+
        cleanup_root_ns(dev);
        cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
        cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
@@ -1828,29 +1831,36 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
 {
        int err = 0;
 
+       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return 0;
+
        err = mlx5_init_fc_stats(dev);
        if (err)
                return err;
 
-       if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+       if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+           MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
                err = init_root_ns(dev);
                if (err)
                        goto err;
        }
+
        if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
-               err = init_fdb_root_ns(dev);
-               if (err)
-                       goto err;
-       }
-       if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
-               err = init_egress_acl_root_ns(dev);
-               if (err)
-                       goto err;
-       }
-       if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
-               err = init_ingress_acl_root_ns(dev);
-               if (err)
-                       goto err;
+               if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+                       err = init_fdb_root_ns(dev);
+                       if (err)
+                               goto err;
+               }
+               if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+                       err = init_egress_acl_root_ns(dev);
+                       if (err)
+                               goto err;
+               }
+               if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+                       err = init_ingress_acl_root_ns(dev);
+                       if (err)
+                               goto err;
+               }
        }
 
        return 0;
index b720a274220d95793f5803620d18db8fcae2073b..b82d65802d964a2d03d9e2a99c2719c7642d72d3 100644 (file)
@@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
        if (out.hdr.status)
                err = mlx5_cmd_status_to_err(&out.hdr);
        else
-               *xrcdn = be32_to_cpu(out.xrcdn);
+               *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
 
        return err;
 }
index b69dadcfb897a25a51b89a9caed4d1454d603d8e..daf44cd4c566f45649b9efe1e4417b4e868c9048 100644 (file)
@@ -508,6 +508,44 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
 }
 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
 
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+                                   u32 vport, u64 node_guid)
+{
+       int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+       void *nic_vport_context;
+       u8 *guid;
+       void *in;
+       int err;
+
+       if (!vport)
+               return -EINVAL;
+       if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+               return -EACCES;
+       if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+               return -ENOTSUPP;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(modify_nic_vport_context_in, in,
+                field_select.node_guid, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+       MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+       nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+                                        in, nic_vport_context);
+       guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
+                           node_guid);
+       MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+       err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+       kvfree(in);
+
+       return err;
+}
+
 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
                                        u16 *qkey_viol_cntr)
 {
index 4a7273771028db08cc4e1bd9e76df1d05fd73785..6f9e3ddff4a8df98dd95818db7e722aaf8cb76a9 100644 (file)
@@ -247,15 +247,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 }
 
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                                   u8 swid)
 {
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        char pspa_pl[MLXSW_REG_PSPA_LEN];
 
-       mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+       mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
 }
 
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+       return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+                                       swid);
+}
+
 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
                                     bool enable)
 {
@@ -305,9 +313,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
 }
 
-static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
-                                          u8 local_port, u8 *p_module,
-                                          u8 *p_width, u8 *p_lane)
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+                                        u8 local_port, u8 *p_module,
+                                        u8 *p_width, u8 *p_lane)
 {
        char pmlp_pl[MLXSW_REG_PMLP_LEN];
        int err;
@@ -322,16 +330,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
        return 0;
 }
 
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
-                                        u8 local_port, u8 *p_module,
-                                        u8 *p_width)
-{
-       u8 lane;
-
-       return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
-                                              p_width, &lane);
-}
-
 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                                    u8 module, u8 width, u8 lane)
 {
@@ -949,17 +947,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
                                            size_t len)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       u8 module, width, lane;
+       u8 module = mlxsw_sp_port->mapping.module;
+       u8 width = mlxsw_sp_port->mapping.width;
+       u8 lane = mlxsw_sp_port->mapping.lane;
        int err;
 
-       err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
-                                             mlxsw_sp_port->local_port,
-                                             &module, &width, &lane);
-       if (err) {
-               netdev_err(dev, "Failed to retrieve module information\n");
-               return err;
-       }
-
        if (!mlxsw_sp_port->split)
                err = snprintf(name, len, "p%d", module + 1);
        else
@@ -1681,8 +1673,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
        return 0;
 }
 
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                                 bool split, u8 module, u8 width)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                               bool split, u8 module, u8 width, u8 lane)
 {
        struct mlxsw_sp_port *mlxsw_sp_port;
        struct net_device *dev;
@@ -1697,6 +1689,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
        mlxsw_sp_port->local_port = local_port;
        mlxsw_sp_port->split = split;
+       mlxsw_sp_port->mapping.module = module;
+       mlxsw_sp_port->mapping.width = width;
+       mlxsw_sp_port->mapping.lane = lane;
        bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
        mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
        if (!mlxsw_sp_port->active_vlans) {
@@ -1839,28 +1834,6 @@ err_port_active_vlans_alloc:
        return err;
 }
 
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                               bool split, u8 module, u8 width, u8 lane)
-{
-       int err;
-
-       err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
-                                      lane);
-       if (err)
-               return err;
-
-       err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
-                                    width);
-       if (err)
-               goto err_port_create;
-
-       return 0;
-
-err_port_create:
-       mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
-       return err;
-}
-
 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
 {
        struct net_device *dev = mlxsw_sp_port->dev;
@@ -1909,8 +1882,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
 
 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 {
+       u8 module, width, lane;
        size_t alloc_size;
-       u8 module, width;
        int i;
        int err;
 
@@ -1921,13 +1894,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 
        for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
                err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
-                                                   &width);
+                                                   &width, &lane);
                if (err)
                        goto err_port_module_info_get;
                if (!width)
                        continue;
                mlxsw_sp->port_to_module[i] = module;
-               err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+               err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+                                          lane);
                if (err)
                        goto err_port_create;
        }
@@ -1948,12 +1922,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
        return local_port - offset;
 }
 
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+                                     u8 module, unsigned int count)
+{
+       u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+       int err, i;
+
+       for (i = 0; i < count; i++) {
+               err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+                                              width, i * width);
+               if (err)
+                       goto err_port_module_map;
+       }
+
+       for (i = 0; i < count; i++) {
+               err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+               if (err)
+                       goto err_port_swid_set;
+       }
+
+       for (i = 0; i < count; i++) {
+               err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+                                          module, width, i * width);
+               if (err)
+                       goto err_port_create;
+       }
+
+       return 0;
+
+err_port_create:
+       for (i--; i >= 0; i--)
+               mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+       i = count;
+err_port_swid_set:
+       for (i--; i >= 0; i--)
+               __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+                                        MLXSW_PORT_SWID_DISABLED_PORT);
+       i = count;
+err_port_module_map:
+       for (i--; i >= 0; i--)
+               mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+       return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+                                        u8 base_port, unsigned int count)
+{
+       u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+       int i;
+
+       /* Split by four means we need to re-create two ports, otherwise
+        * only one.
+        */
+       count = count / 2;
+
+       for (i = 0; i < count; i++) {
+               local_port = base_port + i * 2;
+               module = mlxsw_sp->port_to_module[local_port];
+
+               mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+                                        0);
+       }
+
+       for (i = 0; i < count; i++)
+               __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+       for (i = 0; i < count; i++) {
+               local_port = base_port + i * 2;
+               module = mlxsw_sp->port_to_module[local_port];
+
+               mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+                                    width, 0);
+       }
+}
+
 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                               unsigned int count)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
        struct mlxsw_sp_port *mlxsw_sp_port;
-       u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
        u8 module, cur_width, base_port;
        int i;
        int err;
@@ -1965,18 +2012,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                return -EINVAL;
        }
 
+       module = mlxsw_sp_port->mapping.module;
+       cur_width = mlxsw_sp_port->mapping.width;
+
        if (count != 2 && count != 4) {
                netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
                return -EINVAL;
        }
 
-       err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
-                                           &cur_width);
-       if (err) {
-               netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
-               return err;
-       }
-
        if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
                netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
                return -EINVAL;
@@ -2001,25 +2044,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
        for (i = 0; i < count; i++)
                mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 
-       for (i = 0; i < count; i++) {
-               err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
-                                          module, width, i * width);
-               if (err) {
-                       dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
-                       goto err_port_create;
-               }
+       err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+               goto err_port_split_create;
        }
 
        return 0;
 
-err_port_create:
-       for (i--; i >= 0; i--)
-               mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
-       for (i = 0; i < count / 2; i++) {
-               module = mlxsw_sp->port_to_module[base_port + i * 2];
-               mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
-                                    module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
-       }
+err_port_split_create:
+       mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
        return err;
 }
 
@@ -2027,10 +2061,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
        struct mlxsw_sp_port *mlxsw_sp_port;
-       u8 module, cur_width, base_port;
+       u8 cur_width, base_port;
        unsigned int count;
        int i;
-       int err;
 
        mlxsw_sp_port = mlxsw_sp->ports[local_port];
        if (!mlxsw_sp_port) {
@@ -2044,12 +2077,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
                return -EINVAL;
        }
 
-       err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
-                                           &cur_width);
-       if (err) {
-               netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
-               return err;
-       }
+       cur_width = mlxsw_sp_port->mapping.width;
        count = cur_width == 1 ? 4 : 2;
 
        base_port = mlxsw_sp_cluster_base_port_get(local_port);
@@ -2061,14 +2089,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
        for (i = 0; i < count; i++)
                mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 
-       for (i = 0; i < count / 2; i++) {
-               module = mlxsw_sp->port_to_module[base_port + i * 2];
-               err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
-                                          module, MLXSW_PORT_MODULE_MAX_WIDTH,
-                                          0);
-               if (err)
-                       dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
-       }
+       mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
 
        return 0;
 }
index e2c022d3e2f3bf19df2084049920f40b7eac4c26..13b30eaa13d471180d473459aa8fe38606a0b8f1 100644 (file)
@@ -229,6 +229,11 @@ struct mlxsw_sp_port {
                struct ieee_maxrate *maxrate;
                struct ieee_pfc *pfc;
        } dcb;
+       struct {
+               u8 module;
+               u8 width;
+               u8 lane;
+       } mapping;
        /* 802.1Q bridge VLANs */
        unsigned long *active_vlans;
        unsigned long *untagged_vlans;
index 753064679bdeba5d38fa903f0350480b96c71978..61cc6869fa6508f549d59bc9a525e5e3901d73a7 100644 (file)
@@ -1105,6 +1105,39 @@ static int qed_get_port_type(u32 media_type)
        return port_type;
 }
 
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+                            struct qed_mcp_link_params *params,
+                            struct qed_mcp_link_state *link,
+                            struct qed_mcp_link_capabilities *link_caps)
+{
+       void *p;
+
+       if (!IS_PF(hwfn->cdev)) {
+               qed_vf_get_link_params(hwfn, params);
+               qed_vf_get_link_state(hwfn, link);
+               qed_vf_get_link_caps(hwfn, link_caps);
+
+               return 0;
+       }
+
+       p = qed_mcp_get_link_params(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(params, p, sizeof(*params));
+
+       p = qed_mcp_get_link_state(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(link, p, sizeof(*link));
+
+       p = qed_mcp_get_link_capabilities(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(link_caps, p, sizeof(*link_caps));
+
+       return 0;
+}
+
 static void qed_fill_link(struct qed_hwfn *hwfn,
                          struct qed_link_output *if_link)
 {
@@ -1116,15 +1149,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
        memset(if_link, 0, sizeof(*if_link));
 
        /* Prepare source inputs */
-       if (IS_PF(hwfn->cdev)) {
-               memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
-               memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
-               memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
-                      sizeof(link_caps));
-       } else {
-               qed_vf_get_link_params(hwfn, &params);
-               qed_vf_get_link_state(hwfn, &link);
-               qed_vf_get_link_caps(hwfn, &link_caps);
+       if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+               dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+               return;
        }
 
        /* Set the link parameters to pass to protocol driver */
index c8667c65e685b3be8709a0daa188a1e07d389e22..c90b2b6ad96937feb7f04ef1a8123a73bca061d4 100644 (file)
 #include "qed_vf.h"
 #define QED_VF_ARRAY_LENGTH (3)
 
+#ifdef CONFIG_QED_SRIOV
 #define IS_VF(cdev)             ((cdev)->b_is_vf)
 #define IS_PF(cdev)             (!((cdev)->b_is_vf))
-#ifdef CONFIG_QED_SRIOV
 #define IS_PF_SRIOV(p_hwfn)     (!!((p_hwfn)->cdev->p_iov_info))
 #else
+#define IS_VF(cdev)             (0)
+#define IS_PF(cdev)             (1)
 #define IS_PF_SRIOV(p_hwfn)     (0)
 #endif
 #define IS_PF_SRIOV_ALLOC(p_hwfn)       (!!((p_hwfn)->pf_iov_info))
index 5d00d1404bfc1b3e0e6ef76af0f392dff28fbe2b..5733d188822348c45c5a4f23cd245a0e3cfc8188 100644 (file)
@@ -87,7 +87,9 @@ static const struct pci_device_id qede_pci_tbl[] = {
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
        { 0 }
 };
 
index 7f295c4d7b80cb866c3f2f10f71aeeaefaf3caa5..2a9228a6e4a08cea22ba5bed9e1eae24cc25f591 100644 (file)
@@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
 
        case MC_CMD_MEDIA_XFP:
        case MC_CMD_MEDIA_SFP_PLUS:
-               result |= SUPPORTED_FIBRE;
-               break;
-
        case MC_CMD_MEDIA_QSFP_PLUS:
                result |= SUPPORTED_FIBRE;
+               if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+                       result |= SUPPORTED_1000baseT_Full;
+               if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+                       result |= SUPPORTED_10000baseT_Full;
                if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
                        result |= SUPPORTED_40000baseCR4_Full;
                break;
index 4f7283d05588b53be530270b481cc100d05eb63a..44da877d2483806daf07c2c57760774141d635bf 100644 (file)
@@ -156,7 +156,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
                struct netdev_hw_addr *ha;
 
                netdev_for_each_uc_addr(ha, dev) {
-                       dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
+                       dwmac4_set_umac_addr(hw, ha->addr, reg);
                        reg++;
                }
        }
index eac45d0c75e2c0cc3855f69767737bc9fb7c440f..a473c182c91d0e4f981640ee285486e4a48e95d8 100644 (file)
@@ -3450,8 +3450,6 @@ int stmmac_resume(struct device *dev)
        if (!netif_running(ndev))
                return 0;
 
-       spin_lock_irqsave(&priv->lock, flags);
-
        /* Power Down bit, into the PM register, is cleared
         * automatically as soon as a magic packet or a Wake-up frame
         * is received. Anyway, it's better to manually clear
@@ -3459,7 +3457,9 @@ int stmmac_resume(struct device *dev)
         * from another devices (e.g. serial console).
         */
        if (device_may_wakeup(priv->device)) {
+               spin_lock_irqsave(&priv->lock, flags);
                priv->hw->mac->pmt(priv->hw, 0);
+               spin_unlock_irqrestore(&priv->lock, flags);
                priv->irq_wake = 0;
        } else {
                pinctrl_pm_select_default_state(priv->device);
@@ -3473,6 +3473,8 @@ int stmmac_resume(struct device *dev)
 
        netif_device_attach(ndev);
 
+       spin_lock_irqsave(&priv->lock, flags);
+
        priv->cur_rx = 0;
        priv->dirty_rx = 0;
        priv->dirty_tx = 0;
index 4b08a2f52b3e6074218fb6ba876a639855818d3d..e6bb0ecb12c74cb88204635d3ec2712816597a44 100644 (file)
@@ -1339,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
        if (priv->coal_intvl != 0) {
                struct ethtool_coalesce coal;
 
-               coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+               coal.rx_coalesce_usecs = priv->coal_intvl;
                cpsw_set_coalesce(ndev, &coal);
        }
 
index db8022ae415bd234c19c8348dc6f90697d6ad840..08885bc8d6db6a11a775b30d3a39b4e9bc826f8e 100644 (file)
@@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                                rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
 
                                segCnt = rcdlro->segCnt;
-                               BUG_ON(segCnt <= 1);
+                               WARN_ON_ONCE(segCnt == 0);
                                mss = rcdlro->mss;
                                if (unlikely(segCnt <= 1))
                                        segCnt = 0;
index c4825392d64b6c8de17822532d113689f88d8bb7..3d2b64e63408d69ae86fb15b2b18368ab01b5025 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.7.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.8.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040700
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040800
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index d0631b6cfd5302ee7d73c25937f29f8184e0be83..62f475e31077ca1fa1df15863f02ee0911c46997 100644 (file)
@@ -2540,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                           const u8 *mac, struct station_info *sinfo)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_scb_val_le scb_val;
        s32 err = 0;
        struct brcmf_sta_info_le sta_info_le;
        u32 sta_flags;
        u32 is_tdls_peer;
        s32 total_rssi;
        s32 count_rssi;
+       int rssi;
        u32 i;
 
        brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
@@ -2629,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                        sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
                        total_rssi /= count_rssi;
                        sinfo->signal = total_rssi;
+               } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+                       &ifp->vif->sme_state)) {
+                       memset(&scb_val, 0, sizeof(scb_val));
+                       err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+                                                    &scb_val, sizeof(scb_val));
+                       if (err) {
+                               brcmf_err("Could not get rssi (%d)\n", err);
+                               goto done;
+                       } else {
+                               rssi = le32_to_cpu(scb_val.val);
+                               sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+                               sinfo->signal = rssi;
+                               brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+                       }
                }
        }
 done:
index 68f1ce02f4bf83d8d787e256cb5d74628cc4040e..2b9a2bc429d6fc23a49aecad27e36e14ac5408ce 100644 (file)
@@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
                brcmu_pkt_buf_free_skb(skb);
                return;
        }
+
+       skb->protocol = eth_type_trans(skb, ifp->ndev);
        brcmf_netif_rx(ifp, skb);
 }
 
index 9ed0ed1bf51481e4458dbeae2d105731bbf0f575..4dd5adcdd29bafae3f1432b87758ea2468189dbe 100644 (file)
@@ -2776,6 +2776,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
        if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
            !info->attrs[HWSIM_ATTR_FLAGS] ||
            !info->attrs[HWSIM_ATTR_COOKIE] ||
+           !info->attrs[HWSIM_ATTR_SIGNAL] ||
            !info->attrs[HWSIM_ATTR_TX_INFO])
                goto out;
 
index 0f48048b865407fe8d984b1057bd262ccd28dc7f..3a0faa8fe9d46deef63514afeba5dbbb9ecd7d14 100644 (file)
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m);
 void rtl_addr_delay(u32 addr)
 {
        if (addr == 0xfe)
-               msleep(50);
+               mdelay(50);
        else if (addr == 0xfd)
                msleep(5);
        else if (addr == 0xfc)
@@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
                rtl_addr_delay(addr);
        } else {
                rtl_set_rfreg(hw, rfpath, addr, mask, data);
-               usleep_range(1, 2);
+               udelay(1);
        }
 }
 EXPORT_SYMBOL(rtl_rfreg_delay);
@@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
                rtl_addr_delay(addr);
        } else {
                rtl_set_bbreg(hw, addr, MASKDWORD, data);
-               usleep_range(1, 2);
+               udelay(1);
        }
 }
 EXPORT_SYMBOL(rtl_bb_delay);
index e158b22ef32f16d2fcf61c29fbf6d15a65581c1e..a7a28110dc80aa9df1e700d9c24aa297161ab996 100644 (file)
@@ -2275,7 +2275,7 @@ static int elf_core_dump(struct coredump_params *cprm)
                goto end_coredump;
 
        /* Align to page */
-       if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+       if (!dump_skip(cprm, dataoff - cprm->pos))
                goto end_coredump;
 
        for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
index 71ade0e556b7e7cab5b83705228a54099b067749..203589311bf88733bdb53d329a3b05a490ce9164 100644 (file)
@@ -1787,7 +1787,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
                                goto end_coredump;
        }
 
-       if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+       if (!dump_skip(cprm, dataoff - cprm->pos))
                goto end_coredump;
 
        if (!elf_fdpic_dump_segments(cprm))
index 427c36b430a6e52dfadbcd9d9b620b5368d0b2ff..46025688f1d0363e2141a8a866043cec5e82026e 100644 (file)
@@ -1373,7 +1373,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 
        if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
                BUG_ON(tm->slot != 0);
-               eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
+               eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
+                                               eb->len);
                if (!eb_rewin) {
                        btrfs_tree_read_unlock_blocking(eb);
                        free_extent_buffer(eb);
@@ -1454,7 +1455,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        } else if (old_root) {
                btrfs_tree_read_unlock(eb_root);
                free_extent_buffer(eb_root);
-               eb = alloc_dummy_extent_buffer(root->fs_info, logical);
+               eb = alloc_dummy_extent_buffer(root->fs_info, logical,
+                                       root->nodesize);
        } else {
                btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
                eb = btrfs_clone_extent_buffer(eb_root);
index 6628fca9f4ed87c047e54007f1f94f99ebbaba65..1142127f6e5e287e8fb4d261510de0cdb2802137 100644 (file)
@@ -1147,7 +1147,8 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
                                                 u64 bytenr)
 {
        if (btrfs_test_is_dummy_root(root))
-               return alloc_test_extent_buffer(root->fs_info, bytenr);
+               return alloc_test_extent_buffer(root->fs_info, bytenr,
+                               root->nodesize);
        return alloc_extent_buffer(root->fs_info, bytenr);
 }
 
@@ -1314,14 +1315,16 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 /* Should only be used by the testing infrastructure */
-struct btrfs_root *btrfs_alloc_dummy_root(void)
+struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_root *root;
 
        root = btrfs_alloc_root(NULL, GFP_KERNEL);
        if (!root)
                return ERR_PTR(-ENOMEM);
-       __setup_root(4096, 4096, 4096, root, NULL, 1);
+       /* We don't use the stripesize in selftest, set it as sectorsize */
+       __setup_root(nodesize, sectorsize, sectorsize, root, NULL,
+                       BTRFS_ROOT_TREE_OBJECTID);
        set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
        root->alloc_bytenr = 0;
 
@@ -4130,6 +4133,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
         * Hint to catch really bogus numbers, bitflips or so, more exact checks are
         * done later
         */
+       if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
+               btrfs_err(fs_info, "bytes_used is too small %llu",
+                      btrfs_super_bytes_used(sb));
+               ret = -EINVAL;
+       }
+       if (!is_power_of_2(btrfs_super_stripesize(sb)) ||
+           btrfs_super_stripesize(sb) != sectorsize) {
+               btrfs_err(fs_info, "invalid stripesize %u",
+                      btrfs_super_stripesize(sb));
+               ret = -EINVAL;
+       }
        if (btrfs_super_num_devices(sb) > (1UL << 31))
                printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
                                btrfs_super_num_devices(sb));
index 8e79d0070bcf57d26f6792e7775fde0117834810..acba821499a909e3a8d42e62e34c8f8e3f27bfa2 100644 (file)
@@ -90,7 +90,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
 void btrfs_free_fs_root(struct btrfs_root *root);
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-struct btrfs_root *btrfs_alloc_dummy_root(void);
+struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize);
 #endif
 
 /*
index 6e953de83f08e35d363eaf08466e3ef89449ff64..a3412d68ad37644551a3daf407f673470c1450c3 100644 (file)
@@ -4728,16 +4728,16 @@ err:
 }
 
 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
-                                               u64 start)
+                                               u64 start, u32 nodesize)
 {
        unsigned long len;
 
        if (!fs_info) {
                /*
                 * Called only from tests that don't always have a fs_info
-                * available, but we know that nodesize is 4096
+                * available
                 */
-               len = 4096;
+               len = nodesize;
        } else {
                len = fs_info->tree_root->nodesize;
        }
@@ -4833,7 +4833,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-                                              u64 start)
+                                       u64 start, u32 nodesize)
 {
        struct extent_buffer *eb, *exists = NULL;
        int ret;
@@ -4841,7 +4841,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
        eb = find_extent_buffer(fs_info, start);
        if (eb)
                return eb;
-       eb = alloc_dummy_extent_buffer(fs_info, start);
+       eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
        if (!eb)
                return NULL;
        eb->fs_info = fs_info;
index 1baf19c9b79d2f48d407549b7dcdf03ce0faed3c..c0c1c4fef6cea0a6ab542a38596093b186b5d7d1 100644 (file)
@@ -348,7 +348,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
                                                  u64 start, unsigned long len);
 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
-                                               u64 start);
+                                               u64 start, u32 nodesize);
 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
                                         u64 start);
@@ -468,5 +468,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
                                      u64 *end, u64 max_bytes);
 #endif
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-                                              u64 start);
+                                              u64 start, u32 nodesize);
 #endif
index c6dc1183f54249c3ee5681fcec818fc9194bc52b..69d270f6602c2de22a734af3f848ad03bf767906 100644 (file)
@@ -29,7 +29,7 @@
 #include "inode-map.h"
 #include "volumes.h"
 
-#define BITS_PER_BITMAP                (PAGE_SIZE * 8)
+#define BITS_PER_BITMAP                (PAGE_SIZE * 8UL)
 #define MAX_CACHE_BYTES_PER_GIG        SZ_32K
 
 struct btrfs_trim_range {
@@ -1415,11 +1415,11 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
                                   u64 offset)
 {
        u64 bitmap_start;
-       u32 bytes_per_bitmap;
+       u64 bytes_per_bitmap;
 
        bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
        bitmap_start = offset - ctl->start;
-       bitmap_start = div_u64(bitmap_start, bytes_per_bitmap);
+       bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
        bitmap_start *= bytes_per_bitmap;
        bitmap_start += ctl->start;
 
@@ -1638,10 +1638,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
        u64 bitmap_bytes;
        u64 extent_bytes;
        u64 size = block_group->key.offset;
-       u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
-       u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg);
+       u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
+       u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
 
-       max_bitmaps = max_t(u32, max_bitmaps, 1);
+       max_bitmaps = max_t(u64, max_bitmaps, 1);
 
        ASSERT(ctl->total_bitmaps <= max_bitmaps);
 
@@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
         * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
         * we add more bitmaps.
         */
-       bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
+       bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
 
        if (bitmap_bytes >= max_bytes) {
                ctl->extents_thresh = 0;
@@ -3662,7 +3662,7 @@ have_info:
                        if (tmp->offset + tmp->bytes < offset)
                                break;
                        if (offset + bytes < tmp->offset) {
-                               n = rb_prev(&info->offset_index);
+                               n = rb_prev(&tmp->offset_index);
                                continue;
                        }
                        info = tmp;
@@ -3676,7 +3676,7 @@ have_info:
                        if (offset + bytes < tmp->offset)
                                break;
                        if (tmp->offset + tmp->bytes < offset) {
-                               n = rb_next(&info->offset_index);
+                               n = rb_next(&tmp->offset_index);
                                continue;
                        }
                        info = tmp;
index aae520b2aee535f069946ba332b83a3ab0d44644..a97fdc156a03512bf36df40dd1b4278df845115f 100644 (file)
@@ -24,6 +24,11 @@ int __init btrfs_hash_init(void)
        return PTR_ERR_OR_ZERO(tfm);
 }
 
+const char* btrfs_crc32c_impl(void)
+{
+       return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
+}
+
 void btrfs_hash_exit(void)
 {
        crypto_free_shash(tfm);
index 118a2316e5d39f51cc6293e912ed9d2598d6afd8..c3a2ec554361f5d33b4b5a1a6eda29dca5b82aad 100644 (file)
@@ -22,6 +22,7 @@
 int __init btrfs_hash_init(void);
 
 void btrfs_hash_exit(void);
+const char* btrfs_crc32c_impl(void);
 
 u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length);
 
index 4e59a91a11e0447ab05f5c39b9f5baa58121fc23..4339b6613f190da6feaf83b2e2c2e07ea0aca062 100644 (file)
@@ -2303,7 +2303,7 @@ static void btrfs_interface_exit(void)
 
 static void btrfs_print_mod_info(void)
 {
-       printk(KERN_INFO "Btrfs loaded"
+       printk(KERN_INFO "Btrfs loaded, crc32c=%s"
 #ifdef CONFIG_BTRFS_DEBUG
                        ", debug=on"
 #endif
@@ -2313,33 +2313,48 @@ static void btrfs_print_mod_info(void)
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
                        ", integrity-checker=on"
 #endif
-                       "\n");
+                       "\n",
+                       btrfs_crc32c_impl());
 }
 
 static int btrfs_run_sanity_tests(void)
 {
-       int ret;
-
+       int ret, i;
+       u32 sectorsize, nodesize;
+       u32 test_sectorsize[] = {
+               PAGE_SIZE,
+       };
        ret = btrfs_init_test_fs();
        if (ret)
                return ret;
-
-       ret = btrfs_test_free_space_cache();
-       if (ret)
-               goto out;
-       ret = btrfs_test_extent_buffer_operations();
-       if (ret)
-               goto out;
-       ret = btrfs_test_extent_io();
-       if (ret)
-               goto out;
-       ret = btrfs_test_inodes();
-       if (ret)
-               goto out;
-       ret = btrfs_test_qgroups();
-       if (ret)
-               goto out;
-       ret = btrfs_test_free_space_tree();
+       for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
+               sectorsize = test_sectorsize[i];
+               for (nodesize = sectorsize;
+                    nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
+                    nodesize <<= 1) {
+                       pr_info("BTRFS: selftest: sectorsize: %u  nodesize: %u\n",
+                               sectorsize, nodesize);
+                       ret = btrfs_test_free_space_cache(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_extent_buffer_operations(sectorsize,
+                               nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_extent_io(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_inodes(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_qgroups(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_free_space_tree(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+               }
+       }
 out:
        btrfs_destroy_test_fs();
        return ret;
index f54bf450bad3e8e5fa9de2d4a0a439cff295b1f4..10eb249ef891ce88a24df90cd9afecdcc5d1dc75 100644 (file)
@@ -175,7 +175,7 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
 }
 
 struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length)
+btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
 {
        struct btrfs_block_group_cache *cache;
 
@@ -192,8 +192,8 @@ btrfs_alloc_dummy_block_group(unsigned long length)
        cache->key.objectid = 0;
        cache->key.offset = length;
        cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
-       cache->sectorsize = 4096;
-       cache->full_stripe_len = 4096;
+       cache->sectorsize = sectorsize;
+       cache->full_stripe_len = sectorsize;
 
        INIT_LIST_HEAD(&cache->list);
        INIT_LIST_HEAD(&cache->cluster_list);
index 054b8c73c951e4e50be759ea4b8cb322d80e9041..66fb6b701eb72975fa203fd4018ae847cffd46ab 100644 (file)
 struct btrfs_root;
 struct btrfs_trans_handle;
 
-int btrfs_test_free_space_cache(void);
-int btrfs_test_extent_buffer_operations(void);
-int btrfs_test_extent_io(void);
-int btrfs_test_inodes(void);
-int btrfs_test_qgroups(void);
-int btrfs_test_free_space_tree(void);
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
 int btrfs_init_test_fs(void);
 void btrfs_destroy_test_fs(void);
 struct inode *btrfs_new_test_inode(void);
 struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
 void btrfs_free_dummy_root(struct btrfs_root *root);
 struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length);
+btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
 void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
 void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
 #else
-static inline int btrfs_test_free_space_cache(void)
+static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_extent_buffer_operations(void)
+static inline int btrfs_test_extent_buffer_operations(u32 sectorsize,
+       u32 nodesize)
 {
        return 0;
 }
@@ -57,19 +58,19 @@ static inline int btrfs_init_test_fs(void)
 static inline void btrfs_destroy_test_fs(void)
 {
 }
-static inline int btrfs_test_extent_io(void)
+static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_inodes(void)
+static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_qgroups(void)
+static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_free_space_tree(void)
+static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
index f51963a8f929e97d8030ca1a1e228263094cf244..4f8cbd1ec5ee340d862a8f8c415ce828ebd1a9c8 100644 (file)
@@ -22,7 +22,7 @@
 #include "../extent_io.h"
 #include "../disk-io.h"
 
-static int test_btrfs_split_item(void)
+static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_path *path;
        struct btrfs_root *root;
@@ -40,7 +40,7 @@ static int test_btrfs_split_item(void)
 
        test_msg("Running btrfs_split_item tests\n");
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Could not allocate root\n");
                return PTR_ERR(root);
@@ -53,7 +53,8 @@ static int test_btrfs_split_item(void)
                return -ENOMEM;
        }
 
-       path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096);
+       path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
+                                                       nodesize);
        if (!eb) {
                test_msg("Could not allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -222,8 +223,8 @@ out:
        return ret;
 }
 
-int btrfs_test_extent_buffer_operations(void)
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize)
 {
-       test_msg("Running extent buffer operation tests");
-       return test_btrfs_split_item();
+       test_msg("Running extent buffer operation tests\n");
+       return test_btrfs_split_item(sectorsize, nodesize);
 }
index 55724607f79b8eea00090352d7038885001f7843..d19ab0317283ca728963700c93cff334819eaf16 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/sizes.h>
 #include "btrfs-tests.h"
+#include "../ctree.h"
 #include "../extent_io.h"
 
 #define PROCESS_UNLOCK         (1 << 0)
@@ -65,7 +66,7 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
        return count;
 }
 
-static int test_find_delalloc(void)
+static int test_find_delalloc(u32 sectorsize)
 {
        struct inode *inode;
        struct extent_io_tree tmp;
@@ -113,7 +114,7 @@ static int test_find_delalloc(void)
         * |--- delalloc ---|
         * |---  search  ---|
         */
-       set_extent_delalloc(&tmp, 0, 4095, NULL);
+       set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
        start = 0;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -122,9 +123,9 @@ static int test_find_delalloc(void)
                test_msg("Should have found at least one delalloc\n");
                goto out_bits;
        }
-       if (start != 0 || end != 4095) {
-               test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n",
-                        start, end);
+       if (start != 0 || end != (sectorsize - 1)) {
+               test_msg("Expected start 0 end %u, got start %llu end %llu\n",
+                       sectorsize - 1, start, end);
                goto out_bits;
        }
        unlock_extent(&tmp, start, end);
@@ -144,7 +145,7 @@ static int test_find_delalloc(void)
                test_msg("Couldn't find the locked page\n");
                goto out_bits;
        }
-       set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL);
+       set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -172,7 +173,7 @@ static int test_find_delalloc(void)
         * |--- delalloc ---|
         *                    |--- search ---|
         */
-       test_start = max_bytes + 4096;
+       test_start = max_bytes + sectorsize;
        locked_page = find_lock_page(inode->i_mapping, test_start >>
                                     PAGE_SHIFT);
        if (!locked_page) {
@@ -272,6 +273,16 @@ out:
        return ret;
 }
 
+/**
+ * test_bit_in_byte - Determine whether a bit is set in a byte
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit_in_byte(int nr, const u8 *addr)
+{
+       return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1)));
+}
+
 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
                             unsigned long len)
 {
@@ -298,25 +309,29 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
                return -EINVAL;
        }
 
-       bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
-                  sizeof(long) * BITS_PER_BYTE);
-       extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
-                                sizeof(long) * BITS_PER_BYTE);
-       if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
-               test_msg("Setting straddling pages failed\n");
-               return -EINVAL;
-       }
+       /* Straddling pages test */
+       if (len > PAGE_SIZE) {
+               bitmap_set(bitmap,
+                       (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+                       sizeof(long) * BITS_PER_BYTE);
+               extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+                                       sizeof(long) * BITS_PER_BYTE);
+               if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+                       test_msg("Setting straddling pages failed\n");
+                       return -EINVAL;
+               }
 
-       bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
-       bitmap_clear(bitmap,
-                    (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
-                    sizeof(long) * BITS_PER_BYTE);
-       extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
-       extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
-                                  sizeof(long) * BITS_PER_BYTE);
-       if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
-               test_msg("Clearing straddling pages failed\n");
-               return -EINVAL;
+               bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
+               bitmap_clear(bitmap,
+                       (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+                       sizeof(long) * BITS_PER_BYTE);
+               extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
+               extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+                                       sizeof(long) * BITS_PER_BYTE);
+               if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+                       test_msg("Clearing straddling pages failed\n");
+                       return -EINVAL;
+               }
        }
 
        /*
@@ -333,7 +348,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
        for (i = 0; i < len * BITS_PER_BYTE; i++) {
                int bit, bit1;
 
-               bit = !!test_bit(i, bitmap);
+               bit = !!test_bit_in_byte(i, (u8 *)bitmap);
                bit1 = !!extent_buffer_test_bit(eb, 0, i);
                if (bit1 != bit) {
                        test_msg("Testing bit pattern failed\n");
@@ -351,15 +366,22 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
        return 0;
 }
 
-static int test_eb_bitmaps(void)
+static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
 {
-       unsigned long len = PAGE_SIZE * 4;
+       unsigned long len;
        unsigned long *bitmap;
        struct extent_buffer *eb;
        int ret;
 
        test_msg("Running extent buffer bitmap tests\n");
 
+       /*
+        * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
+        * BTRFS_MAX_METADATA_BLOCKSIZE.
+        */
+       len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
+               ? sectorsize * 4 : sectorsize;
+
        bitmap = kmalloc(len, GFP_KERNEL);
        if (!bitmap) {
                test_msg("Couldn't allocate test bitmap\n");
@@ -379,7 +401,7 @@ static int test_eb_bitmaps(void)
 
        /* Do it over again with an extent buffer which isn't page-aligned. */
        free_extent_buffer(eb);
-       eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
+       eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len);
        if (!eb) {
                test_msg("Couldn't allocate test extent buffer\n");
                kfree(bitmap);
@@ -393,17 +415,17 @@ out:
        return ret;
 }
 
-int btrfs_test_extent_io(void)
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
 {
        int ret;
 
        test_msg("Running extent I/O tests\n");
 
-       ret = test_find_delalloc();
+       ret = test_find_delalloc(sectorsize);
        if (ret)
                goto out;
 
-       ret = test_eb_bitmaps();
+       ret = test_eb_bitmaps(sectorsize, nodesize);
 out:
        test_msg("Extent I/O tests finished\n");
        return ret;
index 0eeb8f3d6b674ba8f67fe54877afde605999fcf3..3956bb2ff84cab59bf25fc9869cebc83d8606621 100644 (file)
@@ -22,7 +22,7 @@
 #include "../disk-io.h"
 #include "../free-space-cache.h"
 
-#define BITS_PER_BITMAP                (PAGE_SIZE * 8)
+#define BITS_PER_BITMAP                (PAGE_SIZE * 8UL)
 
 /*
  * This test just does basic sanity checking, making sure we can add an extent
@@ -99,7 +99,8 @@ static int test_extents(struct btrfs_block_group_cache *cache)
        return 0;
 }
 
-static int test_bitmaps(struct btrfs_block_group_cache *cache)
+static int test_bitmaps(struct btrfs_block_group_cache *cache,
+                       u32 sectorsize)
 {
        u64 next_bitmap_offset;
        int ret;
@@ -139,7 +140,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
         * The first bitmap we have starts at offset 0 so the next one is just
         * at the end of the first bitmap.
         */
-       next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+       next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
 
        /* Test a bit straddling two bitmaps */
        ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M,
@@ -167,9 +168,10 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
 }
 
 /* This is the high grade jackassery */
-static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
+static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache,
+                                   u32 sectorsize)
 {
-       u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+       u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
        int ret;
 
        test_msg("Running bitmap and extent tests\n");
@@ -401,7 +403,8 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
  * requests.
  */
 static int
-test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
+test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
+                                      u32 sectorsize)
 {
        int ret;
        u64 offset;
@@ -539,7 +542,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
         * The goal is to test that the bitmap entry space stealing doesn't
         * steal this space region.
         */
-       ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096);
+       ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize);
        if (ret) {
                test_msg("Error adding free space: %d\n", ret);
                return ret;
@@ -597,8 +600,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -ENOENT;
        }
 
-       if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) {
-               test_msg("Cache free space is not 1Mb + 4Kb\n");
+       if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) {
+               test_msg("Cache free space is not 1Mb + %u\n", sectorsize);
                return -EINVAL;
        }
 
@@ -611,22 +614,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -EINVAL;
        }
 
-       /* All that remains is a 4Kb free space region in a bitmap. Confirm. */
+       /*
+        * All that remains is a sectorsize free space region in a bitmap.
+        * Confirm.
+        */
        ret = check_num_extents_and_bitmaps(cache, 1, 1);
        if (ret)
                return ret;
 
-       if (cache->free_space_ctl->free_space != 4096) {
-               test_msg("Cache free space is not 4Kb\n");
+       if (cache->free_space_ctl->free_space != sectorsize) {
+               test_msg("Cache free space is not %u\n", sectorsize);
                return -EINVAL;
        }
 
        offset = btrfs_find_space_for_alloc(cache,
-                                           0, 4096, 0,
+                                           0, sectorsize, 0,
                                            &max_extent_size);
        if (offset != (SZ_128M + SZ_16M)) {
-               test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n",
-                        offset);
+               test_msg("Failed to allocate %u, returned offset : %llu\n",
+                        sectorsize, offset);
                return -EINVAL;
        }
 
@@ -733,7 +739,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
         * The goal is to test that the bitmap entry space stealing doesn't
         * steal this space region.
         */
-       ret = btrfs_add_free_space(cache, SZ_32M, 8192);
+       ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize);
        if (ret) {
                test_msg("Error adding free space: %d\n", ret);
                return ret;
@@ -757,7 +763,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
 
        /*
         * Confirm that our extent entry didn't stole all free space from the
-        * bitmap, because of the small 8Kb free space region.
+        * bitmap, because of the small 2 * sectorsize free space region.
         */
        ret = check_num_extents_and_bitmaps(cache, 2, 1);
        if (ret)
@@ -783,8 +789,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -ENOENT;
        }
 
-       if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) {
-               test_msg("Cache free space is not 1Mb + 8Kb\n");
+       if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) {
+               test_msg("Cache free space is not 1Mb + %u\n", 2 * sectorsize);
                return -EINVAL;
        }
 
@@ -796,21 +802,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -EINVAL;
        }
 
-       /* All that remains is a 8Kb free space region in a bitmap. Confirm. */
+       /*
+        * All that remains is 2 * sectorsize free space region
+        * in a bitmap. Confirm.
+        */
        ret = check_num_extents_and_bitmaps(cache, 1, 1);
        if (ret)
                return ret;
 
-       if (cache->free_space_ctl->free_space != 8192) {
-               test_msg("Cache free space is not 8Kb\n");
+       if (cache->free_space_ctl->free_space != 2 * sectorsize) {
+               test_msg("Cache free space is not %u\n", 2 * sectorsize);
                return -EINVAL;
        }
 
        offset = btrfs_find_space_for_alloc(cache,
-                                           0, 8192, 0,
+                                           0, 2 * sectorsize, 0,
                                            &max_extent_size);
        if (offset != SZ_32M) {
-               test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n",
+               test_msg("Failed to allocate %u, offset: %llu\n",
+                        2 * sectorsize,
                         offset);
                return -EINVAL;
        }
@@ -825,7 +835,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
        return 0;
 }
 
-int btrfs_test_free_space_cache(void)
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_block_group_cache *cache;
        struct btrfs_root *root = NULL;
@@ -833,13 +843,19 @@ int btrfs_test_free_space_cache(void)
 
        test_msg("Running btrfs free space cache tests\n");
 
-       cache = btrfs_alloc_dummy_block_group(1024 * 1024 * 1024);
+       /*
+        * For ppc64 (with 64k page size), bytes per bitmap might be
+        * larger than 1G.  To make bitmap test available in ppc64,
+        * alloc dummy block group whose size cross bitmaps.
+        */
+       cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize
+                                       + PAGE_SIZE, sectorsize);
        if (!cache) {
                test_msg("Couldn't run the tests\n");
                return 0;
        }
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                ret = PTR_ERR(root);
                goto out;
@@ -855,14 +871,14 @@ int btrfs_test_free_space_cache(void)
        ret = test_extents(cache);
        if (ret)
                goto out;
-       ret = test_bitmaps(cache);
+       ret = test_bitmaps(cache, sectorsize);
        if (ret)
                goto out;
-       ret = test_bitmaps_and_extents(cache);
+       ret = test_bitmaps_and_extents(cache, sectorsize);
        if (ret)
                goto out;
 
-       ret = test_steal_space_from_bitmap_to_extent(cache);
+       ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize);
 out:
        btrfs_free_dummy_block_group(cache);
        btrfs_free_dummy_root(root);
index 7cea4462acd5f8d2ecf671be0c398dd2d6d62a98..aac507085ab0958f89a1ce81f0dc69d3824c2a6c 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/types.h>
 #include "btrfs-tests.h"
 #include "../ctree.h"
 #include "../disk-io.h"
@@ -30,7 +31,7 @@ struct free_space_extent {
  * The test cases align their operations to this in order to hit some of the
  * edge cases in the bitmap code.
  */
-#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096)
+#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE)
 
 static int __check_free_space_extents(struct btrfs_trans_handle *trans,
                                      struct btrfs_fs_info *fs_info,
@@ -439,7 +440,8 @@ typedef int (*test_func_t)(struct btrfs_trans_handle *,
                           struct btrfs_block_group_cache *,
                           struct btrfs_path *);
 
-static int run_test(test_func_t test_func, int bitmaps)
+static int run_test(test_func_t test_func, int bitmaps,
+               u32 sectorsize, u32 nodesize)
 {
        struct btrfs_root *root = NULL;
        struct btrfs_block_group_cache *cache = NULL;
@@ -447,7 +449,7 @@ static int run_test(test_func_t test_func, int bitmaps)
        struct btrfs_path *path = NULL;
        int ret;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate dummy root\n");
                ret = PTR_ERR(root);
@@ -466,7 +468,8 @@ static int run_test(test_func_t test_func, int bitmaps)
        root->fs_info->free_space_root = root;
        root->fs_info->tree_root = root;
 
-       root->node = alloc_test_extent_buffer(root->fs_info, 4096);
+       root->node = alloc_test_extent_buffer(root->fs_info,
+               nodesize, nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -474,9 +477,9 @@ static int run_test(test_func_t test_func, int bitmaps)
        }
        btrfs_set_header_level(root->node, 0);
        btrfs_set_header_nritems(root->node, 0);
-       root->alloc_bytenr += 8192;
+       root->alloc_bytenr += 2 * nodesize;
 
-       cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE);
+       cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize);
        if (!cache) {
                test_msg("Couldn't allocate dummy block group cache\n");
                ret = -ENOMEM;
@@ -534,17 +537,18 @@ out:
        return ret;
 }
 
-static int run_test_both_formats(test_func_t test_func)
+static int run_test_both_formats(test_func_t test_func,
+       u32 sectorsize, u32 nodesize)
 {
        int ret;
 
-       ret = run_test(test_func, 0);
+       ret = run_test(test_func, 0, sectorsize, nodesize);
        if (ret)
                return ret;
-       return run_test(test_func, 1);
+       return run_test(test_func, 1, sectorsize, nodesize);
 }
 
-int btrfs_test_free_space_tree(void)
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
 {
        test_func_t tests[] = {
                test_empty_block_group,
@@ -561,9 +565,11 @@ int btrfs_test_free_space_tree(void)
 
        test_msg("Running free space tree tests\n");
        for (i = 0; i < ARRAY_SIZE(tests); i++) {
-               int ret = run_test_both_formats(tests[i]);
+               int ret = run_test_both_formats(tests[i], sectorsize,
+                       nodesize);
                if (ret) {
-                       test_msg("%pf failed\n", tests[i]);
+                       test_msg("%pf : sectorsize %u failed\n",
+                               tests[i], sectorsize);
                        return ret;
                }
        }
index 8a25fe8b7c452e78d823da897068748ae280de32..29648c0a39f1bc380eaffb08f0d574098dc9d343 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/types.h>
 #include "btrfs-tests.h"
 #include "../ctree.h"
 #include "../btrfs_inode.h"
@@ -86,19 +87,19 @@ static void insert_inode_item_key(struct btrfs_root *root)
  * diagram of how the extents will look though this may not be possible we still
  * want to make sure everything acts normally (the last number is not inclusive)
  *
- * [0 - 5][5 -  6][6 - 10][10 - 4096][  4096 - 8192 ][8192 - 12288]
- * [hole ][inline][ hole ][ regular ][regular1 split][    hole    ]
+ * [0 - 5][5 -  6][     6 - 4096     ][ 4096 - 4100][4100 - 8195][8195 - 12291]
+ * [hole ][inline][hole but no extent][  hole   ][   regular ][regular1 split]
  *
- * [ 12288 - 20480][20480 - 24576][  24576 - 28672  ][28672 - 36864][36864 - 45056]
- * [regular1 split][   prealloc1 ][prealloc1 written][   prealloc1 ][ compressed  ]
+ * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ]
+ * [    hole    ][regular1 split][   prealloc ][   prealloc1  ][prealloc1 written]
  *
- * [45056 - 49152][49152-53248][53248-61440][61440-65536][     65536+81920   ]
- * [ compressed1 ][  regular  ][compressed1][  regular  ][ hole but no extent]
+ * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635]
+ * [  prealloc1  ][ compressed  ][ compressed1 ][    regular  ][ compressed1]
  *
- * [81920-86016]
- * [  regular  ]
+ * [69635-73731][   73731 - 86019   ][86019-90115]
+ * [  regular  ][ hole but no extent][  regular  ]
  */
-static void setup_file_extents(struct btrfs_root *root)
+static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
 {
        int slot = 0;
        u64 disk_bytenr = SZ_1M;
@@ -119,7 +120,7 @@ static void setup_file_extents(struct btrfs_root *root)
        insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
                      slot);
        slot++;
-       offset = 4096;
+       offset = sectorsize;
 
        /* Now another hole */
        insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
@@ -128,99 +129,106 @@ static void setup_file_extents(struct btrfs_root *root)
        offset += 4;
 
        /* Now for a regular extent */
-       insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0,
+                     disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       disk_bytenr += 4096;
-       offset += 4095;
+       disk_bytenr += sectorsize;
+       offset += sectorsize - 1;
 
        /*
         * Now for 3 extents that were split from a hole punch so we test
         * offsets properly.
         */
-       insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+                     4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG,
-                     0, slot);
+       offset += sectorsize;
+       insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0,
+                     BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+       offset += sectorsize;
+       insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+                     2 * sectorsize, disk_bytenr, 4 * sectorsize,
                      BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 8192;
-       disk_bytenr += 16384;
+       offset += 2 * sectorsize;
+       disk_bytenr += 4 * sectorsize;
 
        /* Now for a unwritten prealloc extent */
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+       insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+               sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
        slot++;
-       offset += 4096;
+       offset += sectorsize;
 
        /*
         * We want to jack up disk_bytenr a little more so the em stuff doesn't
         * merge our records.
         */
-       disk_bytenr += 8192;
+       disk_bytenr += 2 * sectorsize;
 
        /*
         * Now for a partially written prealloc extent, basically the same as
         * the hole punch example above.  Ram_bytes never changes when you mark
         * extents written btw.
         */
-       insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
-                     BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+                     4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       offset += sectorsize;
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize,
+                     disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0,
+                     slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+       offset += sectorsize;
+       insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+                     2 * sectorsize, disk_bytenr, 4 * sectorsize,
                      BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
        slot++;
-       offset += 8192;
-       disk_bytenr += 16384;
+       offset += 2 * sectorsize;
+       disk_bytenr += 4 * sectorsize;
 
        /* Now a normal compressed extent */
-       insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+       insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0,
+                     disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG,
+                     BTRFS_COMPRESS_ZLIB, slot);
        slot++;
-       offset += 8192;
+       offset += 2 * sectorsize;
        /* No merges */
-       disk_bytenr += 8192;
+       disk_bytenr += 2 * sectorsize;
 
        /* Now a split compressed extent */
-       insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+                     sectorsize, BTRFS_FILE_EXTENT_REG,
+                     BTRFS_COMPRESS_ZLIB, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096,
+       offset += sectorsize;
+       insert_extent(root, offset, sectorsize, sectorsize, 0,
+                     disk_bytenr + sectorsize, sectorsize,
                      BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096,
+       offset += sectorsize;
+       insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+                     2 * sectorsize, disk_bytenr, sectorsize,
                      BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
        slot++;
-       offset += 8192;
-       disk_bytenr += 8192;
+       offset += 2 * sectorsize;
+       disk_bytenr += 2 * sectorsize;
 
        /* Now extents that have a hole but no hole extent */
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+                     sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 16384;
-       disk_bytenr += 4096;
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       offset += 4 * sectorsize;
+       disk_bytenr += sectorsize;
+       insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+                     sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
 }
 
 static unsigned long prealloc_only = 0;
 static unsigned long compressed_only = 0;
 static unsigned long vacancy_only = 0;
 
-static noinline int test_btrfs_get_extent(void)
+static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
 {
        struct inode *inode = NULL;
        struct btrfs_root *root = NULL;
@@ -240,7 +248,7 @@ static noinline int test_btrfs_get_extent(void)
        BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
        BTRFS_I(inode)->location.offset = 0;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                goto out;
@@ -256,7 +264,7 @@ static noinline int test_btrfs_get_extent(void)
                goto out;
        }
 
-       root->node = alloc_dummy_extent_buffer(NULL, 4096);
+       root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                goto out;
@@ -273,7 +281,7 @@ static noinline int test_btrfs_get_extent(void)
 
        /* First with no extents */
        BTRFS_I(inode)->root = root;
-       em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0);
        if (IS_ERR(em)) {
                em = NULL;
                test_msg("Got an error when we shouldn't have\n");
@@ -295,7 +303,7 @@ static noinline int test_btrfs_get_extent(void)
         * setup_file_extents, so if you change anything there you need to
         * update the comment and update the expected values below.
         */
-       setup_file_extents(root);
+       setup_file_extents(root, sectorsize);
 
        em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0);
        if (IS_ERR(em)) {
@@ -318,7 +326,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -327,7 +335,8 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected an inline, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4091) {
+
+       if (em->start != offset || em->len != (sectorsize - 5)) {
                test_msg("Unexpected extent wanted start %llu len 1, got start "
                         "%llu len %llu\n", offset, em->start, em->len);
                goto out;
@@ -344,7 +353,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -366,7 +375,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Regular extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -375,7 +384,7 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4095) {
+       if (em->start != offset || em->len != sectorsize - 1) {
                test_msg("Unexpected extent wanted start %llu len 4095, got "
                         "start %llu len %llu\n", offset, em->start, em->len);
                goto out;
@@ -393,7 +402,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* The next 3 are split extents */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -402,9 +411,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -421,7 +431,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -430,9 +440,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a hole, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -442,7 +453,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -451,9 +462,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -475,7 +487,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Prealloc extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -484,9 +496,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != prealloc_only) {
@@ -503,7 +516,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* The next 3 are a half written prealloc extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -512,9 +525,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != prealloc_only) {
@@ -532,7 +546,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -541,9 +555,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -564,7 +579,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -573,9 +588,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != prealloc_only) {
@@ -598,7 +614,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Now for the compressed extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -607,9 +623,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u,"
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != compressed_only) {
@@ -631,7 +648,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Split compressed extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -640,9 +657,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u,"
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != compressed_only) {
@@ -665,7 +683,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -674,9 +692,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -691,7 +710,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -701,9 +720,10 @@ static noinline int test_btrfs_get_extent(void)
                         disk_bytenr, em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != compressed_only) {
@@ -725,7 +745,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* A hole between regular extents but no hole extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -734,9 +754,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -765,9 +786,10 @@ static noinline int test_btrfs_get_extent(void)
         * length of the actual hole, if this changes we'll have to change this
         * test.
         */
-       if (em->start != offset || em->len != 12288) {
-               test_msg("Unexpected extent wanted start %llu len 12288, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 3 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 3 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != vacancy_only) {
@@ -783,7 +805,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -792,9 +814,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u,"
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -815,7 +838,7 @@ out:
        return ret;
 }
 
-static int test_hole_first(void)
+static int test_hole_first(u32 sectorsize, u32 nodesize)
 {
        struct inode *inode = NULL;
        struct btrfs_root *root = NULL;
@@ -832,7 +855,7 @@ static int test_hole_first(void)
        BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
        BTRFS_I(inode)->location.offset = 0;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                goto out;
@@ -844,7 +867,7 @@ static int test_hole_first(void)
                goto out;
        }
 
-       root->node = alloc_dummy_extent_buffer(NULL, 4096);
+       root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                goto out;
@@ -861,9 +884,9 @@ static int test_hole_first(void)
         * btrfs_get_extent.
         */
        insert_inode_item_key(root);
-       insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, 1);
-       em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0);
+       insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize,
+                     sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1);
+       em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -872,9 +895,10 @@ static int test_hole_first(void)
                test_msg("Expected a hole, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != 0 || em->len != 4096) {
-               test_msg("Unexpected extent wanted start 0 len 4096, got start "
-                        "%llu len %llu\n", em->start, em->len);
+       if (em->start != 0 || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start 0 len %u, "
+                       "got start %llu len %llu\n",
+                       sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != vacancy_only) {
@@ -884,18 +908,19 @@ static int test_hole_first(void)
        }
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0);
+       em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
        }
-       if (em->block_start != 4096) {
+       if (em->block_start != sectorsize) {
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != 4096 || em->len != 4096) {
-               test_msg("Unexpected extent wanted start 4096 len 4096, got "
-                        "start %llu len %llu\n", em->start, em->len);
+       if (em->start != sectorsize || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %u len %u, "
+                       "got start %llu len %llu\n",
+                       sectorsize, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -912,7 +937,7 @@ out:
        return ret;
 }
 
-static int test_extent_accounting(void)
+static int test_extent_accounting(u32 sectorsize, u32 nodesize)
 {
        struct inode *inode = NULL;
        struct btrfs_root *root = NULL;
@@ -924,7 +949,7 @@ static int test_extent_accounting(void)
                return ret;
        }
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                goto out;
@@ -954,10 +979,11 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE][4k] */
+       /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
        BTRFS_I(inode)->outstanding_extents++;
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
-                                       BTRFS_MAX_EXTENT_SIZE + 4095, NULL);
+                                       BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
+                                       NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -969,10 +995,10 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */
+       /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */
        ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
                               BTRFS_MAX_EXTENT_SIZE >> 1,
-                              (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
+                              (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
                               EXTENT_DELALLOC | EXTENT_DIRTY |
                               EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
                               NULL, GFP_KERNEL);
@@ -987,10 +1013,11 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE][4K] */
+       /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
        BTRFS_I(inode)->outstanding_extents++;
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
-                                       (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
+                                       (BTRFS_MAX_EXTENT_SIZE >> 1)
+                                       + sectorsize - 1,
                                        NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
@@ -1004,16 +1031,17 @@ static int test_extent_accounting(void)
        }
 
        /*
-        * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K]
+        * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize]
         *
         * I'm artificially adding 2 to outstanding_extents because in the
         * buffered IO case we'd add things up as we go, but I don't feel like
         * doing that here, this isn't the interesting case we want to test.
         */
        BTRFS_I(inode)->outstanding_extents += 2;
-       ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192,
-                                       (BTRFS_MAX_EXTENT_SIZE << 1) + 12287,
-                                       NULL);
+       ret = btrfs_set_extent_delalloc(inode,
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
+                       (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
+                       NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1025,10 +1053,13 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */
+       /*
+       * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize]
+       */
        BTRFS_I(inode)->outstanding_extents++;
-       ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
-                                       BTRFS_MAX_EXTENT_SIZE+8191, NULL);
+       ret = btrfs_set_extent_delalloc(inode,
+                       BTRFS_MAX_EXTENT_SIZE + sectorsize,
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1042,8 +1073,8 @@ static int test_extent_accounting(void)
 
        /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
        ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
-                              BTRFS_MAX_EXTENT_SIZE+4096,
-                              BTRFS_MAX_EXTENT_SIZE+8191,
+                              BTRFS_MAX_EXTENT_SIZE + sectorsize,
+                              BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
                               EXTENT_DIRTY | EXTENT_DELALLOC |
                               EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
                               NULL, GFP_KERNEL);
@@ -1063,8 +1094,9 @@ static int test_extent_accounting(void)
         * might fail and I'd rather satisfy my paranoia at this point.
         */
        BTRFS_I(inode)->outstanding_extents++;
-       ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
-                                       BTRFS_MAX_EXTENT_SIZE+8191, NULL);
+       ret = btrfs_set_extent_delalloc(inode,
+                       BTRFS_MAX_EXTENT_SIZE + sectorsize,
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1103,7 +1135,7 @@ out:
        return ret;
 }
 
-int btrfs_test_inodes(void)
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
 {
        int ret;
 
@@ -1112,13 +1144,13 @@ int btrfs_test_inodes(void)
        set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
 
        test_msg("Running btrfs_get_extent tests\n");
-       ret = test_btrfs_get_extent();
+       ret = test_btrfs_get_extent(sectorsize, nodesize);
        if (ret)
                return ret;
        test_msg("Running hole first btrfs_get_extent test\n");
-       ret = test_hole_first();
+       ret = test_hole_first(sectorsize, nodesize);
        if (ret)
                return ret;
        test_msg("Running outstanding_extents tests\n");
-       return test_extent_accounting();
+       return test_extent_accounting(sectorsize, nodesize);
 }
index 8aa4ded31326c95e8b19b4ced8f9c9fab1c4c8de..57a12c0d680ba300461ff5ee9714833b541f2810 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/types.h>
 #include "btrfs-tests.h"
 #include "../ctree.h"
 #include "../transaction.h"
@@ -216,7 +217,8 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
        return ret;
 }
 
-static int test_no_shared_qgroup(struct btrfs_root *root)
+static int test_no_shared_qgroup(struct btrfs_root *root,
+               u32 sectorsize, u32 nodesize)
 {
        struct btrfs_trans_handle trans;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -227,7 +229,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
        btrfs_init_dummy_trans(&trans);
 
        test_msg("Qgroup basic add\n");
-       ret = btrfs_create_qgroup(NULL, fs_info, 5);
+       ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID);
        if (ret) {
                test_msg("Couldn't create a qgroup %d\n", ret);
                return ret;
@@ -238,18 +240,19 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
         * we can only call btrfs_qgroup_account_extent() directly to test
         * quota.
         */
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
+       ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FS_TREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -257,32 +260,33 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                               nodesize, nodesize)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
        old_roots = NULL;
        new_roots = NULL;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = remove_extent_item(root, 4096, 4096);
+       ret = remove_extent_item(root, nodesize, nodesize);
        if (ret)
                return -EINVAL;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -290,14 +294,14 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return -EINVAL;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 0, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
@@ -310,7 +314,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
  * right, also remove one of the roots and make sure the exclusive count is
  * adjusted properly.
  */
-static int test_multiple_refs(struct btrfs_root *root)
+static int test_multiple_refs(struct btrfs_root *root,
+               u32 sectorsize, u32 nodesize)
 {
        struct btrfs_trans_handle trans;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -322,25 +327,29 @@ static int test_multiple_refs(struct btrfs_root *root)
 
        test_msg("Qgroup multiple refs test\n");
 
-       /* We have 5 created already from the previous test */
-       ret = btrfs_create_qgroup(NULL, fs_info, 256);
+       /*
+        * We have BTRFS_FS_TREE_OBJECTID created already from the
+        * previous test.
+        */
+       ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID);
        if (ret) {
                test_msg("Couldn't create a qgroup %d\n", ret);
                return ret;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
+       ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FS_TREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -348,30 +357,32 @@ static int test_multiple_refs(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                                      nodesize, nodesize)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = add_tree_ref(root, 4096, 4096, 0, 256);
+       ret = add_tree_ref(root, nodesize, nodesize, 0,
+                       BTRFS_FIRST_FREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -379,35 +390,38 @@ static int test_multiple_refs(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                                       nodesize, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 256, 4096, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+                                       nodesize, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = remove_extent_ref(root, 4096, 4096, 0, 256);
+       ret = remove_extent_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FIRST_FREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -415,19 +429,21 @@ static int test_multiple_refs(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 256, 0, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+                                       0, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                                       nodesize, nodesize)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
@@ -435,13 +451,13 @@ static int test_multiple_refs(struct btrfs_root *root)
        return 0;
 }
 
-int btrfs_test_qgroups(void)
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_root *root;
        struct btrfs_root *tmp_root;
        int ret = 0;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                return PTR_ERR(root);
@@ -468,7 +484,8 @@ int btrfs_test_qgroups(void)
         * Can't use bytenr 0, some things freak out
         * *cough*backref walking code*cough*
         */
-       root->node = alloc_test_extent_buffer(root->fs_info, 4096);
+       root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
+                                       nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -476,16 +493,16 @@ int btrfs_test_qgroups(void)
        }
        btrfs_set_header_level(root->node, 0);
        btrfs_set_header_nritems(root->node, 0);
-       root->alloc_bytenr += 8192;
+       root->alloc_bytenr += 2 * nodesize;
 
-       tmp_root = btrfs_alloc_dummy_root();
+       tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(tmp_root)) {
                test_msg("Couldn't allocate a fs root\n");
                ret = PTR_ERR(tmp_root);
                goto out;
        }
 
-       tmp_root->root_key.objectid = 5;
+       tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID;
        root->fs_info->fs_root = tmp_root;
        ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
        if (ret) {
@@ -493,14 +510,14 @@ int btrfs_test_qgroups(void)
                goto out;
        }
 
-       tmp_root = btrfs_alloc_dummy_root();
+       tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(tmp_root)) {
                test_msg("Couldn't allocate a fs root\n");
                ret = PTR_ERR(tmp_root);
                goto out;
        }
 
-       tmp_root->root_key.objectid = 256;
+       tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID;
        ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
        if (ret) {
                test_msg("Couldn't insert fs root %d\n", ret);
@@ -508,10 +525,10 @@ int btrfs_test_qgroups(void)
        }
 
        test_msg("Running qgroup tests\n");
-       ret = test_no_shared_qgroup(root);
+       ret = test_no_shared_qgroup(root, sectorsize, nodesize);
        if (ret)
                goto out;
-       ret = test_multiple_refs(root);
+       ret = test_multiple_refs(root, sectorsize, nodesize);
 out:
        btrfs_free_dummy_root(root);
        return ret;
index da9e0036a864d3d8cf45b164b0fb083675223584..548faaa9e1693d8cad721dcb02c589f5e2f811a5 100644 (file)
@@ -4241,6 +4241,7 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
        if (IS_ERR(uuid_root)) {
                ret = PTR_ERR(uuid_root);
                btrfs_abort_transaction(trans, tree_root, ret);
+               btrfs_end_transaction(trans, tree_root);
                return ret;
        }
 
@@ -6258,27 +6259,23 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
        return dev;
 }
 
-static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
-                         struct extent_buffer *leaf,
-                         struct btrfs_chunk *chunk)
+/* Return -EIO if any error, otherwise return 0. */
+static int btrfs_check_chunk_valid(struct btrfs_root *root,
+                                  struct extent_buffer *leaf,
+                                  struct btrfs_chunk *chunk, u64 logical)
 {
-       struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
-       struct map_lookup *map;
-       struct extent_map *em;
-       u64 logical;
        u64 length;
        u64 stripe_len;
-       u64 devid;
-       u8 uuid[BTRFS_UUID_SIZE];
-       int num_stripes;
-       int ret;
-       int i;
+       u16 num_stripes;
+       u16 sub_stripes;
+       u64 type;
 
-       logical = key->offset;
        length = btrfs_chunk_length(leaf, chunk);
        stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
        num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
-       /* Validation check */
+       sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+       type = btrfs_chunk_type(leaf, chunk);
+
        if (!num_stripes) {
                btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
                          num_stripes);
@@ -6289,6 +6286,11 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                          "invalid chunk logical %llu", logical);
                return -EIO;
        }
+       if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
+               btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
+                         btrfs_chunk_sector_size(leaf, chunk));
+               return -EIO;
+       }
        if (!length || !IS_ALIGNED(length, root->sectorsize)) {
                btrfs_err(root->fs_info,
                        "invalid chunk length %llu", length);
@@ -6300,13 +6302,54 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                return -EIO;
        }
        if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
-           btrfs_chunk_type(leaf, chunk)) {
+           type) {
                btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
                          ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
                            BTRFS_BLOCK_GROUP_PROFILE_MASK) &
                          btrfs_chunk_type(leaf, chunk));
                return -EIO;
        }
+       if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
+           (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
+           (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
+           (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
+           (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
+           ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
+            num_stripes != 1)) {
+               btrfs_err(root->fs_info,
+                       "invalid num_stripes:sub_stripes %u:%u for profile %llu",
+                       num_stripes, sub_stripes,
+                       type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
+                         struct extent_buffer *leaf,
+                         struct btrfs_chunk *chunk)
+{
+       struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+       struct map_lookup *map;
+       struct extent_map *em;
+       u64 logical;
+       u64 length;
+       u64 stripe_len;
+       u64 devid;
+       u8 uuid[BTRFS_UUID_SIZE];
+       int num_stripes;
+       int ret;
+       int i;
+
+       logical = key->offset;
+       length = btrfs_chunk_length(leaf, chunk);
+       stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+       num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+
+       ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
+       if (ret)
+               return ret;
 
        read_lock(&map_tree->map_tree.lock);
        em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
@@ -6554,6 +6597,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        u32 array_size;
        u32 len = 0;
        u32 cur_offset;
+       u64 type;
        struct btrfs_key key;
 
        ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
@@ -6620,6 +6664,15 @@ int btrfs_read_sys_array(struct btrfs_root *root)
                                break;
                        }
 
+                       type = btrfs_chunk_type(sb, chunk);
+                       if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
+                               btrfs_err(root->fs_info,
+                           "invalid chunk type %llu in sys_array at offset %u",
+                                       type, cur_offset);
+                               ret = -EIO;
+                               break;
+                       }
+
                        len = btrfs_chunk_item_size(num_stripes);
                        if (cur_offset + len > array_size)
                                goto out_short_read;
@@ -6638,12 +6691,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
                sb_array_offset += len;
                cur_offset += len;
        }
+       clear_extent_buffer_uptodate(sb);
        free_extent_buffer_stale(sb);
        return ret;
 
 out_short_read:
        printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
                        len, cur_offset);
+       clear_extent_buffer_uptodate(sb);
        free_extent_buffer_stale(sb);
        return -EIO;
 }
@@ -6656,6 +6711,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
        struct btrfs_key found_key;
        int ret;
        int slot;
+       u64 total_dev = 0;
 
        root = root->fs_info->chunk_root;
 
@@ -6697,6 +6753,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
                        ret = read_one_dev(root, leaf, dev_item);
                        if (ret)
                                goto error;
+                       total_dev++;
                } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
                        struct btrfs_chunk *chunk;
                        chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
@@ -6706,6 +6763,28 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
                }
                path->slots[0]++;
        }
+
+       /*
+        * After loading chunk tree, we've got all device information,
+        * do another round of validation checks.
+        */
+       if (total_dev != root->fs_info->fs_devices->total_devices) {
+               btrfs_err(root->fs_info,
+          "super_num_devices %llu mismatch with num_devices %llu found here",
+                         btrfs_super_num_devices(root->fs_info->super_copy),
+                         total_dev);
+               ret = -EINVAL;
+               goto error;
+       }
+       if (btrfs_super_total_bytes(root->fs_info->super_copy) <
+           root->fs_info->fs_devices->total_rw_bytes) {
+               btrfs_err(root->fs_info,
+       "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
+                         btrfs_super_total_bytes(root->fs_info->super_copy),
+                         root->fs_info->fs_devices->total_rw_bytes);
+               ret = -EINVAL;
+               goto error;
+       }
        ret = 0;
 error:
        unlock_chunks(root);
index 38a7ab87e10a80d9d62d80b74dd58c1f2989a966..281b768000e664e4d4ef9092d4bb567d003623a0 100644 (file)
@@ -794,6 +794,7 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
                        return 0;
                file->f_pos = pos;
                cprm->written += n;
+               cprm->pos += n;
                nr -= n;
        }
        return 1;
@@ -808,6 +809,7 @@ int dump_skip(struct coredump_params *cprm, size_t nr)
                if (dump_interrupted() ||
                    file->f_op->llseek(file, nr, SEEK_CUR) < 0)
                        return 0;
+               cprm->pos += nr;
                return 1;
        } else {
                while (nr > PAGE_SIZE) {
@@ -822,7 +824,7 @@ EXPORT_SYMBOL(dump_skip);
 
 int dump_align(struct coredump_params *cprm, int align)
 {
-       unsigned mod = cprm->file->f_pos & (align - 1);
+       unsigned mod = cprm->pos & (align - 1);
        if (align & (align - 1))
                return 0;
        return mod ? dump_skip(cprm, align - mod) : 1;
index ad4a542e9babdceb321a77ef5c8837ad70177a6a..817c243c1ff114d5a0e5243a4269fdac925994e7 100644 (file)
@@ -1636,7 +1636,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
        struct dentry *dentry = __d_alloc(parent->d_sb, name);
        if (!dentry)
                return NULL;
-
+       dentry->d_flags |= DCACHE_RCUACCESS;
        spin_lock(&parent->d_lock);
        /*
         * don't need child lock because it is not subject
@@ -2358,7 +2358,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
 {
        BUG_ON(!d_unhashed(entry));
        hlist_bl_lock(b);
-       entry->d_flags |= DCACHE_RCUACCESS;
        hlist_bl_add_head_rcu(&entry->d_hash, b);
        hlist_bl_unlock(b);
 }
@@ -2843,6 +2842,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
        /* ... and switch them in the tree */
        if (IS_ROOT(dentry)) {
                /* splicing a tree */
+               dentry->d_flags |= DCACHE_RCUACCESS;
                dentry->d_parent = target->d_parent;
                target->d_parent = target;
                list_del_init(&target->d_child);
index 866bb18efefea9953250ba1cbdc145e7d4be49af..e818f5ac7a2692b6bb5c1a132d7bbfd967ad956a 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/mount.h>
+#include <linux/file.h>
 #include "ecryptfs_kernel.h"
 
 struct ecryptfs_open_req {
@@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
        flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
        (*lower_file) = dentry_open(&req.path, flags, cred);
        if (!IS_ERR(*lower_file))
-               goto out;
+               goto have_file;
        if ((flags & O_ACCMODE) == O_RDONLY) {
                rc = PTR_ERR((*lower_file));
                goto out;
@@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
        mutex_unlock(&ecryptfs_kthread_ctl.mux);
        wake_up(&ecryptfs_kthread_ctl.wait);
        wait_for_completion(&req.done);
-       if (IS_ERR(*lower_file))
+       if (IS_ERR(*lower_file)) {
                rc = PTR_ERR(*lower_file);
+               goto out;
+       }
+have_file:
+       if ((*lower_file)->f_op->mmap == NULL) {
+               fput(*lower_file);
+               *lower_file = NULL;
+               rc = -EMEDIUMTYPE;
+       }
 out:
        return rc;
 }
index 6a82fb7e21273ca457f583da77af8b2cae9f0a81..70580ab1445c89f8379756fc16e33223aca4ab0d 100644 (file)
@@ -3030,9 +3030,13 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
                        }
                        if (*opened & FILE_CREATED)
                                fsnotify_create(dir, dentry);
-                       path->dentry = dentry;
-                       path->mnt = nd->path.mnt;
-                       return 1;
+                       if (unlikely(d_is_negative(dentry))) {
+                               error = -ENOENT;
+                       } else {
+                               path->dentry = dentry;
+                               path->mnt = nd->path.mnt;
+                               return 1;
+                       }
                }
        }
        dput(dentry);
@@ -3201,9 +3205,7 @@ static int do_last(struct nameidata *nd,
        int acc_mode = op->acc_mode;
        unsigned seq;
        struct inode *inode;
-       struct path save_parent = { .dentry = NULL, .mnt = NULL };
        struct path path;
-       bool retried = false;
        int error;
 
        nd->flags &= ~LOOKUP_PARENT;
@@ -3246,7 +3248,6 @@ static int do_last(struct nameidata *nd,
                        return -EISDIR;
        }
 
-retry_lookup:
        if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
                error = mnt_want_write(nd->path.mnt);
                if (!error)
@@ -3298,6 +3299,10 @@ retry_lookup:
                got_write = false;
        }
 
+       error = follow_managed(&path, nd);
+       if (unlikely(error < 0))
+               return error;
+
        if (unlikely(d_is_negative(path.dentry))) {
                path_to_nameidata(&path, nd);
                return -ENOENT;
@@ -3313,10 +3318,6 @@ retry_lookup:
                return -EEXIST;
        }
 
-       error = follow_managed(&path, nd);
-       if (unlikely(error < 0))
-               return error;
-
        seq = 0;        /* out of RCU mode, so the value doesn't matter */
        inode = d_backing_inode(path.dentry);
 finish_lookup:
@@ -3327,23 +3328,14 @@ finish_lookup:
        if (unlikely(error))
                return error;
 
-       if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
-               path_to_nameidata(&path, nd);
-       } else {
-               save_parent.dentry = nd->path.dentry;
-               save_parent.mnt = mntget(path.mnt);
-               nd->path.dentry = path.dentry;
-
-       }
+       path_to_nameidata(&path, nd);
        nd->inode = inode;
        nd->seq = seq;
        /* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
 finish_open:
        error = complete_walk(nd);
-       if (error) {
-               path_put(&save_parent);
+       if (error)
                return error;
-       }
        audit_inode(nd->name, nd->path.dentry, 0);
        error = -EISDIR;
        if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
@@ -3366,13 +3358,9 @@ finish_open_created:
                goto out;
        BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
        error = vfs_open(&nd->path, file, current_cred());
-       if (!error) {
-               *opened |= FILE_OPENED;
-       } else {
-               if (error == -EOPENSTALE)
-                       goto stale_open;
+       if (error)
                goto out;
-       }
+       *opened |= FILE_OPENED;
 opened:
        error = open_check_o_direct(file);
        if (!error)
@@ -3388,26 +3376,7 @@ out:
        }
        if (got_write)
                mnt_drop_write(nd->path.mnt);
-       path_put(&save_parent);
        return error;
-
-stale_open:
-       /* If no saved parent or already retried then can't retry */
-       if (!save_parent.dentry || retried)
-               goto out;
-
-       BUG_ON(save_parent.dentry != dir);
-       path_put(&nd->path);
-       nd->path = save_parent;
-       nd->inode = dir->d_inode;
-       save_parent.mnt = NULL;
-       save_parent.dentry = NULL;
-       if (got_write) {
-               mnt_drop_write(nd->path.mnt);
-               got_write = false;
-       }
-       retried = true;
-       goto retry_lookup;
 }
 
 static int do_tmpfile(struct nameidata *nd, unsigned flags,
index 4fb1691b435552c044134b56aa2032da35684b5b..a7ec92c051f5409da4ef23aab35030d230b975da 100644 (file)
@@ -2409,8 +2409,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
                        mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
                }
                if (type->fs_flags & FS_USERNS_VISIBLE) {
-                       if (!fs_fully_visible(type, &mnt_flags))
+                       if (!fs_fully_visible(type, &mnt_flags)) {
+                               put_filesystem(type);
                                return -EPERM;
+                       }
                }
        }
 
@@ -3271,7 +3273,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
                list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
                        struct inode *inode = child->mnt_mountpoint->d_inode;
                        /* Only worry about locked mounts */
-                       if (!(mnt_flags & MNT_LOCKED))
+                       if (!(child->mnt.mnt_flags & MNT_LOCKED))
                                continue;
                        /* Is the directory permanetly empty? */
                        if (!is_empty_dir_inode(inode))
index 55bc7d6c8aacaed2f4892bd1a554e3baf939bf34..06702783bf40254593f82b76dd9745ad557ac334 100644 (file)
@@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
        if (IS_ERR(sb))
                return ERR_CAST(sb);
 
+       /*
+        * procfs isn't actually a stacking filesystem; however, there is
+        * too much magic going on inside it to permit stacking things on
+        * top of it
+        */
+       sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
+
        if (!proc_parse_options(options, ns)) {
                deactivate_locked_super(sb);
                return ERR_PTR(-EINVAL);
index 6bd05700d8c94cfe5c190008c1eb8910bf8ae0ae..05f05f17a7c2e4fc14f8fa858abc9f7d7715c7b9 100644 (file)
 
 #include <asm-generic/qspinlock_types.h>
 
+/**
+ * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+#ifndef queued_spin_unlock_wait
+extern void queued_spin_unlock_wait(struct qspinlock *lock);
+#endif
+
 /**
  * queued_spin_is_locked - is the spinlock locked?
  * @lock: Pointer to queued spinlock structure
  * Return: 1 if it is locked, 0 otherwise
  */
+#ifndef queued_spin_is_locked
 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 {
        /*
-        * queued_spin_lock_slowpath() can ACQUIRE the lock before
-        * issuing the unordered store that sets _Q_LOCKED_VAL.
-        *
-        * See both smp_cond_acquire() sites for more detail.
-        *
-        * This however means that in code like:
-        *
-        *   spin_lock(A)               spin_lock(B)
-        *   spin_unlock_wait(B)        spin_is_locked(A)
-        *   do_something()             do_something()
-        *
-        * Both CPUs can end up running do_something() because the store
-        * setting _Q_LOCKED_VAL will pass through the loads in
-        * spin_unlock_wait() and/or spin_is_locked().
+        * See queued_spin_unlock_wait().
         *
-        * Avoid this by issuing a full memory barrier between the spin_lock()
-        * and the loads in spin_unlock_wait() and spin_is_locked().
-        *
-        * Note that regular mutual exclusion doesn't care about this
-        * delayed store.
+        * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
+        * isn't immediately observable.
         */
-       smp_mb();
-       return atomic_read(&lock->val) & _Q_LOCKED_MASK;
+       return atomic_read(&lock->val);
 }
+#endif
 
 /**
  * queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 }
 #endif
 
-/**
- * queued_spin_unlock_wait - wait until current lock holder releases the lock
- * @lock : Pointer to queued spinlock structure
- *
- * There is a very slight possibility of live-lock if the lockers keep coming
- * and the waiter is just unfortunate enough to not see any unlock state.
- */
-static inline void queued_spin_unlock_wait(struct qspinlock *lock)
-{
-       /* See queued_spin_is_locked() */
-       smp_mb();
-       while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
-               cpu_relax();
-}
-
 #ifndef virt_spin_lock
 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 {
index 576e4639ca609e8bfb8686474406073937e912e9..314b3caa701cc20a6c3fcd628de3c48b7c0c7d21 100644 (file)
@@ -65,6 +65,7 @@ struct coredump_params {
        unsigned long limit;
        unsigned long mm_flags;
        loff_t written;
+       loff_t pos;
 };
 
 /*
index 0c72204c75fc9428f08c9e63667bf75dee0bc153..fb39d5add173f94a3f9be0e77455e0f5b54764d8 100644 (file)
@@ -25,7 +25,7 @@
 #define CLK_SET_PARENT_GATE    BIT(1) /* must be gated across re-parent */
 #define CLK_SET_RATE_PARENT    BIT(2) /* propagate rate change up one level */
 #define CLK_IGNORE_UNUSED      BIT(3) /* do not gate even if unused */
-#define CLK_IS_ROOT            BIT(4) /* Deprecated: Don't use */
+                               /* unused */
 #define CLK_IS_BASIC           BIT(5) /* Basic clk, can't do a to_clk_foo() */
 #define CLK_GET_RATE_NOCACHE   BIT(6) /* do not use the cached clk rate */
 #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
index 786ad32631a672695d88b47d67c1019df624acab..07b83d32f66c857aeabe1eae476db0c1a9e5fbda 100644 (file)
@@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
 extern int cpuidle_play_dead(void);
 
 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
+static inline struct cpuidle_device *cpuidle_get_device(void)
+{return __this_cpu_read(cpuidle_devices); }
 #else
 static inline void disable_cpuidle(void) { }
 static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
@@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
 static inline int cpuidle_play_dead(void) {return -ENODEV; }
 static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
        struct cpuidle_device *dev) {return NULL; }
+static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
 #endif
 
 #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
index c2db3ca22217c85891f06b53496bb6f644d2399e..f196dd0b0f2f6c241715807f63bdccaa30973ce8 100644 (file)
@@ -1005,7 +1005,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
 /* Iterate through an efi_memory_map */
 #define for_each_efi_memory_desc_in_map(m, md)                            \
        for ((md) = (m)->map;                                              \
-            (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
+            ((void *)(md) + (m)->desc_size) <= (m)->map_end;              \
             (md) = (void *)(md) + (m)->desc_size)
 
 /**
index 035abdf62cfe953e5b15822aa847868032989574..73a48479892dd60ffc60a52e508764a4f897abde 100644 (file)
@@ -1240,8 +1240,6 @@ struct mlx5_destroy_psv_out {
        u8                      rsvd[8];
 };
 
-#define MLX5_CMD_OP_MAX 0x920
-
 enum {
        VPORT_STATE_DOWN                = 0x0,
        VPORT_STATE_UP                  = 0x1,
@@ -1369,6 +1367,12 @@ enum mlx5_cap_type {
 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
        MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
 
+#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
+       MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
+       MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+
 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
        MLX5_GET(flow_table_eswitch_cap, \
                 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
index 9a05cd7e5890b43400c65f6c903d5d815daffa28..e955a285900991c5623fca30010a960f68b7c7e8 100644 (file)
@@ -205,7 +205,8 @@ enum {
        MLX5_CMD_OP_ALLOC_FLOW_COUNTER            = 0x939,
        MLX5_CMD_OP_DEALLOC_FLOW_COUNTER          = 0x93a,
        MLX5_CMD_OP_QUERY_FLOW_COUNTER            = 0x93b,
-       MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c
+       MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c,
+       MLX5_CMD_OP_MAX
 };
 
 struct mlx5_ifc_flow_table_fields_supported_bits {
@@ -500,7 +501,9 @@ struct mlx5_ifc_e_switch_cap_bits {
        u8         vport_svlan_insert[0x1];
        u8         vport_cvlan_insert_if_not_exist[0x1];
        u8         vport_cvlan_insert_overwrite[0x1];
-       u8         reserved_at_5[0x1b];
+       u8         reserved_at_5[0x19];
+       u8         nic_vport_node_guid_modify[0x1];
+       u8         nic_vport_port_guid_modify[0x1];
 
        u8         reserved_at_20[0x7e0];
 };
@@ -4583,7 +4586,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
 };
 
 struct mlx5_ifc_modify_nic_vport_field_select_bits {
-       u8         reserved_at_0[0x19];
+       u8         reserved_at_0[0x16];
+       u8         node_guid[0x1];
+       u8         port_guid[0x1];
+       u8         reserved_at_18[0x1];
        u8         mtu[0x1];
        u8         change_event[0x1];
        u8         promisc[0x1];
index 64221027bf1f0d8bdaa2a1506e18da338e0f0c1e..266320feb16037f414903daf712ed097c70ea9b8 100644 (file)
@@ -460,10 +460,9 @@ struct mlx5_core_qp {
 };
 
 struct mlx5_qp_path {
-       u8                      fl;
+       u8                      fl_free_ar;
        u8                      rsvd3;
-       u8                      free_ar;
-       u8                      pkey_index;
+       __be16                  pkey_index;
        u8                      rsvd0;
        u8                      grh_mlid;
        __be16                  rlid;
@@ -560,6 +559,7 @@ struct mlx5_modify_qp_mbox_in {
        __be32                  optparam;
        u8                      rsvd0[4];
        struct mlx5_qp_context  ctx;
+       u8                      rsvd2[16];
 };
 
 struct mlx5_modify_qp_mbox_out {
index 301da4a5e6bfa340363799310a21251c685935f0..6c16c198f680c26df40d1322ca1381e6bbb90510 100644 (file)
@@ -50,6 +50,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
                                           u64 *system_image_guid);
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+                                   u32 vport, u64 node_guid);
 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
                                        u16 *qkey_viol_cntr);
 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
index 7973a821ac58877a56ff5214793786948170ae8e..ead97654c4e9afa7239e068cb37f6981d77ce6a3 100644 (file)
@@ -277,7 +277,10 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
 
 static inline int raw_read_seqcount_latch(seqcount_t *s)
 {
-       return lockless_dereference(s)->sequence;
+       int seq = READ_ONCE(s->sequence);
+       /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
+       smp_read_barrier_depends();
+       return seq;
 }
 
 /**
@@ -331,7 +334,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
  *     unsigned seq, idx;
  *
  *     do {
- *             seq = lockless_dereference(latch)->seq;
+ *             seq = raw_read_seqcount_latch(&latch->seq);
  *
  *             idx = seq & 0x01;
  *             entry = data_query(latch->data[idx], ...);
index 48103cf94e976e9c13809cfec895e9bb1c9fa96c..13de0ccaa0594172a946572bbc04397ab0a89032 100644 (file)
@@ -42,6 +42,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 
 int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
                      struct sockaddr __user **, struct iovec **);
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
 asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
                                   unsigned int);
 asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
index af4c10ebb2414494e75c279b3d9c91da48442982..cd6018a9ee2467cec1fbe4d621fc294bc843a05e 100644 (file)
@@ -1232,7 +1232,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 const char *ip_vs_state_name(__u16 proto, int state);
 
 void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
-int ip_vs_check_template(struct ip_vs_conn *ct);
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
 void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
 int ip_vs_conn_init(void);
 void ip_vs_conn_cleanup(void);
index 9c5638ad872e39d2d01cca6f86c1620e499c916c..0dbce55437f2c57cc1642d865a91ffc4169c9866 100644 (file)
@@ -28,8 +28,8 @@ struct nf_queue_handler {
                                                struct nf_hook_ops *ops);
 };
 
-void nf_register_queue_handler(const struct nf_queue_handler *qh);
-void nf_unregister_queue_handler(void);
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
+void nf_unregister_queue_handler(struct net *net);
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
 void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
index 38aa4983e2a90bd5a391e844e0556591e5d85f60..36d723579af21f78e2da780c4840762ce88a4770 100644 (file)
@@ -5,11 +5,13 @@
 
 struct proc_dir_entry;
 struct nf_logger;
+struct nf_queue_handler;
 
 struct netns_nf {
 #if defined CONFIG_PROC_FS
        struct proc_dir_entry *proc_netfilter;
 #endif
+       const struct nf_queue_handler __rcu *queue_handler;
        const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *nf_log_dir_header;
index 0f7efa88f210360928b36a80022c9bd53d778b40..3722dda0199ddaa0e5e8554c57c843968075a7b9 100644 (file)
@@ -392,16 +392,20 @@ struct tc_cls_u32_offload {
        };
 };
 
-static inline bool tc_should_offload(struct net_device *dev, u32 flags)
+static inline bool tc_should_offload(const struct net_device *dev,
+                                    const struct tcf_proto *tp, u32 flags)
 {
+       const struct Qdisc *sch = tp->q;
+       const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
+
        if (!(dev->features & NETIF_F_HW_TC))
                return false;
-
        if (flags & TCA_CLS_FLAGS_SKIP_HW)
                return false;
-
        if (!dev->netdev_ops->ndo_setup_tc)
                return false;
+       if (cops && cops->tcf_cl_offload)
+               return cops->tcf_cl_offload(tp->classid);
 
        return true;
 }
index a1fd76c22a5903cadf2e97c844017925cfbc9f93..62d553184e91c5c6f27da411f9c448fef91ded0f 100644 (file)
@@ -168,6 +168,7 @@ struct Qdisc_class_ops {
 
        /* Filter manipulation */
        struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
+       bool                    (*tcf_cl_offload)(u32 classid);
        unsigned long           (*bind_tcf)(struct Qdisc *, unsigned long,
                                        u32 classid);
        void                    (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -691,9 +692,11 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
        /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
        if (!sch->gso_skb) {
                sch->gso_skb = sch->dequeue(sch);
-               if (sch->gso_skb)
+               if (sch->gso_skb) {
                        /* it's still part of the queue */
+                       qdisc_qstats_backlog_inc(sch, sch->gso_skb);
                        sch->q.qlen++;
+               }
        }
 
        return sch->gso_skb;
@@ -706,6 +709,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
 
        if (skb) {
                sch->gso_skb = NULL;
+               qdisc_qstats_backlog_dec(sch, skb);
                sch->q.qlen--;
        } else {
                skb = sch->dequeue(sch);
index 432bed510369e7b19a4773ec5a8bb09b8d9bb1e6..7e440d41487aa8671fbb8952fb01e2bbf63b11f0 100644 (file)
@@ -217,10 +217,10 @@ enum ib_device_cap_flags {
        IB_DEVICE_CROSS_CHANNEL         = (1 << 27),
        IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
        IB_DEVICE_SIGNATURE_HANDOVER            = (1 << 30),
-       IB_DEVICE_ON_DEMAND_PAGING              = (1 << 31),
+       IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
        IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
-       IB_DEVICE_VIRTUAL_FUNCTION              = ((u64)1 << 33),
-       IB_DEVICE_RAW_SCATTER_FCS               = ((u64)1 << 34),
+       IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
+       IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
 };
 
 enum ib_signature_prot_cap {
index 23c6960e94a4fa268dbd8286be6a993ebbd6ce12..2bdd1e3e70076dd3476e3398452c4837b2b80f9b 100644 (file)
@@ -118,7 +118,7 @@ struct btrfs_ioctl_vol_args_v2 {
        };
        union {
                char name[BTRFS_SUBVOL_NAME_MAX + 1];
-               u64 devid;
+               __u64 devid;
        };
 };
 
index ca1054dd82497ddcbce9624e9ad20b2791248be4..72a04a0e8ccef1e5560378af9177a6c47954daaf 100644 (file)
@@ -1,5 +1,5 @@
 #ifndef _UAPI_LINUX_GTP_H_
-#define _UAPI_LINUX_GTP_H__
+#define _UAPI_LINUX_GTP_H_
 
 enum gtp_genl_cmds {
        GTP_CMD_NEWPDP,
index a7f27704f9807e7b14eadb9b08cf09c2c183a6a7..691984cb0b915da8afbc60211145d9dbb45c7143 100644 (file)
@@ -1,5 +1,6 @@
 # UAPI Header export list
 header-y += asequencer.h
+header-y += asoc.h
 header-y += asound.h
 header-y += asound_fm.h
 header-y += compress_offload.h
@@ -10,3 +11,5 @@ header-y += hdsp.h
 header-y += hdspm.h
 header-y += sb16_csp.h
 header-y += sfnt_info.h
+header-y += tlv.h
+header-y += usb_stream.h
index 274450efea90eebeb5f3fa4ddb777c2ea2e6ef73..9c51ec3f0f440f35dda1bf9e9b3e02e421388df3 100644 (file)
@@ -3862,10 +3862,8 @@ static void _free_event(struct perf_event *event)
        if (event->ctx)
                put_ctx(event->ctx);
 
-       if (event->pmu) {
-               exclusive_event_destroy(event);
-               module_put(event->pmu->module);
-       }
+       exclusive_event_destroy(event);
+       module_put(event->pmu->module);
 
        call_rcu(&event->rcu_head, free_event_rcu);
 }
index ee25f5ba4aca13f8a05a9e6c3a4c5a2aff768131..33664f70e2d25e880efdbc8695fcc12989871150 100644 (file)
@@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
 {
        unsigned long address = (unsigned long)uaddr;
        struct mm_struct *mm = current->mm;
-       struct page *page;
+       struct page *page, *tail;
        struct address_space *mapping;
        int err, ro = 0;
 
@@ -530,7 +530,15 @@ again:
         * considered here and page lock forces unnecessarily serialization
         * From this point on, mapping will be re-verified if necessary and
         * page lock will be acquired only if it is unavoidable
-        */
+        *
+        * Mapping checks require the head page for any compound page so the
+        * head page and mapping is looked up now. For anonymous pages, it
+        * does not matter if the page splits in the future as the key is
+        * based on the address. For filesystem-backed pages, the tail is
+        * required as the index of the page determines the key. For
+        * base pages, there is no tail page and tail == page.
+        */
+       tail = page;
        page = compound_head(page);
        mapping = READ_ONCE(page->mapping);
 
@@ -654,7 +662,7 @@ again:
 
                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
                key->shared.inode = inode;
-               key->shared.pgoff = basepage_index(page);
+               key->shared.pgoff = basepage_index(tail);
                rcu_read_unlock();
        }
 
index e364b424b019ff51de0bde17fe2611444b5c1efb..79d2d765a75f1fef4945dacd2b0aa07b77eba336 100644 (file)
@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
        if (!hold_ctx)
                return 0;
 
-       if (unlikely(ctx == hold_ctx))
-               return -EALREADY;
-
        if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
            (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
 #ifdef CONFIG_DEBUG_MUTEXES
@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        unsigned long flags;
        int ret;
 
+       if (use_ww_ctx) {
+               struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+               if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
+                       return -EALREADY;
+       }
+
        preempt_disable();
        mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
index ce2f75e32ae155b30cfe324c56f9bc526771b66a..5fc8c311b8fe59d46decc2c5a049ce6e860a07b8 100644 (file)
@@ -267,6 +267,66 @@ static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
 #define queued_spin_lock_slowpath      native_queued_spin_lock_slowpath
 #endif
 
+/*
+ * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
+ * issuing an _unordered_ store to set _Q_LOCKED_VAL.
+ *
+ * This means that the store can be delayed, but no later than the
+ * store-release from the unlock. This means that simply observing
+ * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
+ *
+ * There are two paths that can issue the unordered store:
+ *
+ *  (1) clear_pending_set_locked():    *,1,0 -> *,0,1
+ *
+ *  (2) set_locked():                  t,0,0 -> t,0,1 ; t != 0
+ *      atomic_cmpxchg_relaxed():      t,0,0 -> 0,0,1
+ *
+ * However, in both cases we have other !0 state we've set before to queue
+ * ourseves:
+ *
+ * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
+ * load is constrained by that ACQUIRE to not pass before that, and thus must
+ * observe the store.
+ *
+ * For (2) we have a more intersting scenario. We enqueue ourselves using
+ * xchg_tail(), which ends up being a RELEASE. This in itself is not
+ * sufficient, however that is followed by an smp_cond_acquire() on the same
+ * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
+ * guarantees we must observe that store.
+ *
+ * Therefore both cases have other !0 state that is observable before the
+ * unordered locked byte store comes through. This means we can use that to
+ * wait for the lock store, and then wait for an unlock.
+ */
+#ifndef queued_spin_unlock_wait
+void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+       u32 val;
+
+       for (;;) {
+               val = atomic_read(&lock->val);
+
+               if (!val) /* not locked, we're done */
+                       goto done;
+
+               if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
+                       break;
+
+               /* not locked, but pending, wait until we observe the lock */
+               cpu_relax();
+       }
+
+       /* any unlock is good */
+       while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+               cpu_relax();
+
+done:
+       smp_rmb(); /* CTRL + RMB -> ACQUIRE */
+}
+EXPORT_SYMBOL(queued_spin_unlock_wait);
+#endif
+
 #endif /* _GEN_PV_LOCK_SLOWPATH */
 
 /**
index 074994bcfa9be14336ecb06912a969f6d9546c9d..04d7cf3ef8cf932e036c7c76f6a27cefa962efc0 100644 (file)
@@ -614,6 +614,7 @@ free_bufs:
 
        kref_put(&chan->kref, relay_destroy_channel);
        mutex_unlock(&relay_channels_mutex);
+       kfree(chan);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(relay_open);
index 7f2cae4620c7a949fdc294c06211884e2ca83afb..017d5394f5dc9206c21259d61f77936396796448 100644 (file)
@@ -2253,9 +2253,11 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
 #endif
 #endif
 
+#ifdef CONFIG_SCHEDSTATS
+
 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
+static bool __initdata __sched_schedstats = false;
 
-#ifdef CONFIG_SCHEDSTATS
 static void set_schedstats(bool enabled)
 {
        if (enabled)
@@ -2278,11 +2280,16 @@ static int __init setup_schedstats(char *str)
        if (!str)
                goto out;
 
+       /*
+        * This code is called before jump labels have been set up, so we can't
+        * change the static branch directly just yet.  Instead set a temporary
+        * variable so init_schedstats() can do it later.
+        */
        if (!strcmp(str, "enable")) {
-               set_schedstats(true);
+               __sched_schedstats = true;
                ret = 1;
        } else if (!strcmp(str, "disable")) {
-               set_schedstats(false);
+               __sched_schedstats = false;
                ret = 1;
        }
 out:
@@ -2293,6 +2300,11 @@ out:
 }
 __setup("schedstats=", setup_schedstats);
 
+static void __init init_schedstats(void)
+{
+       set_schedstats(__sched_schedstats);
+}
+
 #ifdef CONFIG_PROC_SYSCTL
 int sysctl_schedstats(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -2313,8 +2325,10 @@ int sysctl_schedstats(struct ctl_table *table, int write,
                set_schedstats(state);
        return err;
 }
-#endif
-#endif
+#endif /* CONFIG_PROC_SYSCTL */
+#else  /* !CONFIG_SCHEDSTATS */
+static inline void init_schedstats(void) {}
+#endif /* CONFIG_SCHEDSTATS */
 
 /*
  * fork()/clone()-time setup:
@@ -3156,7 +3170,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
 static inline void schedule_debug(struct task_struct *prev)
 {
 #ifdef CONFIG_SCHED_STACK_END_CHECK
-       BUG_ON(task_stack_end_corrupted(prev));
+       if (task_stack_end_corrupted(prev))
+               panic("corrupted stack end detected inside scheduler\n");
 #endif
 
        if (unlikely(in_atomic_preempt_off())) {
@@ -7487,6 +7502,8 @@ void __init sched_init(void)
 #endif
        init_sched_fair_class();
 
+       init_schedstats();
+
        scheduler_running = 1;
 }
 
index cf905f655ba120fd91d2dd023c6a5a93ad699d4b..0368c393a3362d981e79745716cbb59b2989dac8 100644 (file)
@@ -427,19 +427,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
                SPLIT_NS(p->se.vruntime),
                (long long)(p->nvcsw + p->nivcsw),
                p->prio);
-#ifdef CONFIG_SCHEDSTATS
-       if (schedstat_enabled()) {
-               SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
-                       SPLIT_NS(p->se.statistics.wait_sum),
-                       SPLIT_NS(p->se.sum_exec_runtime),
-                       SPLIT_NS(p->se.statistics.sum_sleep_runtime));
-       }
-#else
+
        SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
-               0LL, 0L,
+               SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)),
                SPLIT_NS(p->se.sum_exec_runtime),
-               0LL, 0L);
-#endif
+               SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime)));
+
 #ifdef CONFIG_NUMA_BALANCING
        SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
 #endif
index bd12c6c714ecea0718565df4539304e3bf9d9e57..c5aeedf4e93ad8f8f5c2edaf7248ac0ce9c47e8e 100644 (file)
@@ -127,7 +127,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
  */
 static void cpuidle_idle_call(void)
 {
-       struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+       struct cpuidle_device *dev = cpuidle_get_device();
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int next_state, entered_state;
 
index 70b3b6a20fb0e362f4c816fe0f069c7c8576c7f4..78955cbea31c4a5378bc0b73f5f3bbcbdfa23d75 100644 (file)
@@ -33,6 +33,8 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 # define schedstat_inc(rq, field)      do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
 # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
 # define schedstat_set(var, val)       do { if (schedstat_enabled()) { var = (val); } } while (0)
+# define schedstat_val(rq, field)      ((schedstat_enabled()) ? (rq)->field : 0)
+
 #else /* !CONFIG_SCHEDSTATS */
 static inline void
 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
@@ -47,6 +49,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 # define schedstat_inc(rq, field)      do { } while (0)
 # define schedstat_add(rq, field, amt) do { } while (0)
 # define schedstat_set(var, val)       do { } while (0)
+# define schedstat_val(rq, field)      0
 #endif
 
 #ifdef CONFIG_SCHED_INFO
index 780bcbe1d4de33bbb24d2328b231bb056bc641b5..720b7bb01d43ae5f49761da3a34d8fce4e55dbc2 100644 (file)
@@ -198,7 +198,7 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
-       file = (struct file *)array->ptrs[index];
+       file = READ_ONCE(array->ptrs[index]);
        if (unlikely(!file))
                return -ENOENT;
 
@@ -247,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
-       file = (struct file *)array->ptrs[index];
+       file = READ_ONCE(array->ptrs[index]);
        if (unlikely(!file))
                return -ENOENT;
 
index b8024fa7101d9a8ff625606de973b9b670d0ecf4..6c707bfe02fde002feae9ea2fab3fc9647c3baa0 100644 (file)
@@ -126,6 +126,17 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
                 */
                start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
                end_index = (endbyte >> PAGE_SHIFT);
+               if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
+                       /* First page is tricky as 0 - 1 = -1, but pgoff_t
+                        * is unsigned, so the end_index >= start_index
+                        * check below would be true and we'll discard the whole
+                        * file cache which is not what was asked.
+                        */
+                       if (end_index == 0)
+                               break;
+
+                       end_index--;
+               }
 
                if (end_index >= start_index) {
                        unsigned long count = invalidate_mapping_pages(mapping,
index d26162e81feaa78b2fb4615839cbb580fc626f4b..388c2bb9b55cff8e8b89cac1947bf87ef1c83cd7 100644 (file)
@@ -832,8 +832,27 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
         * Only the process that called mmap() has reserves for
         * private mappings.
         */
-       if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
-               return true;
+       if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+               /*
+                * Like the shared case above, a hole punch or truncate
+                * could have been performed on the private mapping.
+                * Examine the value of chg to determine if reserves
+                * actually exist or were previously consumed.
+                * Very Subtle - The value of chg comes from a previous
+                * call to vma_needs_reserves().  The reserve map for
+                * private mappings has different (opposite) semantics
+                * than that of shared mappings.  vma_needs_reserves()
+                * has already taken this difference in semantics into
+                * account.  Therefore, the meaning of chg is the same
+                * as in the shared case above.  Code could easily be
+                * combined, but keeping it separate draws attention to
+                * subtle differences.
+                */
+               if (chg)
+                       return false;
+               else
+                       return true;
+       }
 
        return false;
 }
@@ -1816,6 +1835,25 @@ static long __vma_reservation_common(struct hstate *h,
 
        if (vma->vm_flags & VM_MAYSHARE)
                return ret;
+       else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
+               /*
+                * In most cases, reserves always exist for private mappings.
+                * However, a file associated with mapping could have been
+                * hole punched or truncated after reserves were consumed.
+                * As subsequent fault on such a range will not use reserves.
+                * Subtle - The reserve map for private mappings has the
+                * opposite meaning than that of shared mappings.  If NO
+                * entry is in the reserve map, it means a reservation exists.
+                * If an entry exists in the reserve map, it means the
+                * reservation has already been consumed.  As a result, the
+                * return value of this routine is the opposite of the
+                * value returned from reserve map manipulation routines above.
+                */
+               if (ret)
+                       return 0;
+               else
+                       return 1;
+       }
        else
                return ret < 0 ? ret : 0;
 }
index 18b6a2b8d183550daa6c3e64ae7ba50610ead79c..28439acda6ec60cdeb438448587db447971ade2d 100644 (file)
@@ -763,8 +763,8 @@ static int kasan_mem_notifier(struct notifier_block *nb,
 
 static int __init kasan_memhotplug_init(void)
 {
-       pr_err("WARNING: KASAN doesn't support memory hot-add\n");
-       pr_err("Memory hot-add will be disabled\n");
+       pr_info("WARNING: KASAN doesn't support memory hot-add\n");
+       pr_info("Memory hot-add will be disabled\n");
 
        hotplug_memory_notifier(kasan_mem_notifier, 0);
 
index 58c69c94402a74498a488392eabeed15ddddd1ee..75e74408cc8f5081228288ed7bfd5d3e3ce9b4cd 100644 (file)
@@ -1608,7 +1608,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
 
 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 {
-       if (!current->memcg_may_oom || current->memcg_in_oom)
+       if (!current->memcg_may_oom)
                return;
        /*
         * We are in the middle of the charge context here, so we
index 95916142fc46d3a80a6ddfa54218e1de641f6045..59f5fafa6e1fcb165e75153244d1b77d25fdb37d 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -667,6 +667,24 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
 
 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 
+/*
+ * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
+ * workqueue, aiding in getting memory freed.
+ */
+static struct workqueue_struct *lru_add_drain_wq;
+
+static int __init lru_init(void)
+{
+       lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
+
+       if (WARN(!lru_add_drain_wq,
+               "Failed to create workqueue lru_add_drain_wq"))
+               return -ENOMEM;
+
+       return 0;
+}
+early_initcall(lru_init);
+
 void lru_add_drain_all(void)
 {
        static DEFINE_MUTEX(lock);
@@ -686,7 +704,7 @@ void lru_add_drain_all(void)
                    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
                    need_activate_page_drain(cpu)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
-                       schedule_work_on(cpu, work);
+                       queue_work_on(cpu, lru_add_drain_wq, work);
                        cpumask_set_cpu(cpu, &has_work);
                }
        }
index 0d457e7db8d6df5e3bd185ac2b2333830bccf7b4..c99463ac02fb56d270fdc4ea7ab749e1f58fe666 100644 (file)
@@ -252,7 +252,10 @@ static inline void free_swap_cache(struct page *page)
 void free_page_and_swap_cache(struct page *page)
 {
        free_swap_cache(page);
-       put_page(page);
+       if (is_huge_zero_page(page))
+               put_huge_zero_page();
+       else
+               put_page(page);
 }
 
 /*
index dcea4f4c62b3b43d701fb68a74a6aa1a1e0aa8cf..c18080ad408572f53df75e18e2b56f714f784edb 100644 (file)
@@ -279,6 +279,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
         * change from under us.
         */
        list_for_each_entry(v, &vg->vlan_list, vlist) {
+               if (!br_vlan_should_use(v))
+                       continue;
                f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
                if (f && f->is_local && !f->dst)
                        fdb_delete_local(br, NULL, f);
index 5cfd26a0006f07d15ee62a5660e0177745f454cf..1cd2ec046164a659d7e4ad1b1acc24eceb780a6e 100644 (file)
@@ -309,8 +309,8 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
        __scm_destroy(scm);
 }
 
-static int do_set_attach_filter(struct socket *sock, int level, int optname,
-                               char __user *optval, unsigned int optlen)
+/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
 {
        struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
        struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
@@ -323,6 +323,19 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
            __get_user(ptr, &fprog32->filter) ||
            __put_user(len, &kfprog->len) ||
            __put_user(compat_ptr(ptr), &kfprog->filter))
+               return NULL;
+
+       return kfprog;
+}
+EXPORT_SYMBOL_GPL(get_compat_bpf_fprog);
+
+static int do_set_attach_filter(struct socket *sock, int level, int optname,
+                               char __user *optval, unsigned int optlen)
+{
+       struct sock_fprog __user *kfprog;
+
+       kfprog = get_compat_bpf_fprog(optval);
+       if (!kfprog)
                return -EFAULT;
 
        return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
@@ -354,7 +367,8 @@ static int do_set_sock_timeout(struct socket *sock, int level,
 static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
                                char __user *optval, unsigned int optlen)
 {
-       if (optname == SO_ATTACH_FILTER)
+       if (optname == SO_ATTACH_FILTER ||
+           optname == SO_ATTACH_REUSEPORT_CBPF)
                return do_set_attach_filter(sock, level, optname,
                                            optval, optlen);
        if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
index f96ee8b9478d8a752e78e1ca39c2351f5863f054..be873e4e312572516a2ab47cdac6053efd3459af 100644 (file)
@@ -47,6 +47,7 @@ nla_put_failure:
  * @xstats_type: TLV type for backward compatibility xstats TLV
  * @lock: statistics lock
  * @d: dumping handle
+ * @padattr: padding attribute
  *
  * Initializes the dumping handle, grabs the statistic lock and appends
  * an empty TLV header to the socket buffer for use a container for all
@@ -87,6 +88,7 @@ EXPORT_SYMBOL(gnet_stats_start_copy_compat);
  * @type: TLV type for top level statistic TLV
  * @lock: statistics lock
  * @d: dumping handle
+ * @padattr: padding attribute
  *
  * Initializes the dumping handle, grabs the statistic lock and appends
  * an empty TLV header to the socket buffer for use a container for all
index 2b3f76fe65f49f8018c63aa44b345262fb40cde7..7a0b616557abb6570dbc86a0a7cf04a50a83214b 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/jiffies.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
+#include <linux/of_net.h>
 
 #include "net-sysfs.h"
 
index d56c0559b477cb96ce78c9b9b7dacc3109594f3a..0ff31d97d48586af00c6973d123576e040ed422a 100644 (file)
@@ -1618,12 +1618,12 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       if (rcu_access_pointer(sk->sk_filter)) {
-               if (udp_lib_checksum_complete(skb))
+       if (rcu_access_pointer(sk->sk_filter) &&
+           udp_lib_checksum_complete(skb))
                        goto csum_error;
-               if (sk_filter(sk, skb))
-                       goto drop;
-       }
+
+       if (sk_filter(sk, skb))
+               goto drop;
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
index f4ac2842d4d9543d2ac1e7d4b91b99df32d43e6d..fdc9de276ab1aeecfbc917e8718953d0aa0691ee 100644 (file)
@@ -1256,6 +1256,8 @@ static int ip6gre_tap_init(struct net_device *dev)
        if (ret)
                return ret;
 
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
        tunnel = netdev_priv(dev);
 
        ip6gre_tnl_link_config(tunnel, 1);
@@ -1289,6 +1291,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
 
        dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 }
 
 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
index cbf127ae7c676650cc626cbf12cd61b6b570ea43..635b8d340cdbbbce62a173ed4016b6c5c81fc6b9 100644 (file)
@@ -1071,17 +1071,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
                                         const struct in6_addr *final_dst)
 {
        struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
-       int err;
 
        dst = ip6_sk_dst_check(sk, dst, fl6);
+       if (!dst)
+               dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
 
-       err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
-       if (err)
-               return ERR_PTR(err);
-       if (final_dst)
-               fl6->daddr = *final_dst;
-
-       return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+       return dst;
 }
 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
 
index 6989c70ae29f1541e0216d36d58ee142934a83c2..4a84b5ad9ecbb74b29ef509d00fbda60868833cd 100644 (file)
@@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
        fl6.daddr = *gw;
        fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
                        (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
+       fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst->error) {
                dst_release(dst);
index 79e33e02f11accfd5a6c8cf2751df0a8793ab016..f36c2d076fce0df30d202d7683a67e3614d77fe9 100644 (file)
@@ -1721,7 +1721,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
        destp = ntohs(inet->inet_dport);
        srcp  = ntohs(inet->inet_sport);
 
-       if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
+       if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                timer_active    = 1;
                timer_expires   = icsk->icsk_timeout;
        } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
index 2da1896af93496cc58762c67b4cd4b4f42924901..f421c9f23c5bef7bf58937635474713cf4722516 100644 (file)
@@ -653,12 +653,12 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       if (rcu_access_pointer(sk->sk_filter)) {
-               if (udp_lib_checksum_complete(skb))
-                       goto csum_error;
-               if (sk_filter(sk, skb))
-                       goto drop;
-       }
+       if (rcu_access_pointer(sk->sk_filter) &&
+           udp_lib_checksum_complete(skb))
+               goto csum_error;
+
+       if (sk_filter(sk, skb))
+               goto drop;
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
index 6edfa99803148815e383eb13ce6a9c1eb098058d..1e40dacaa137f00df3df79c0faf4eb4de65e86c2 100644 (file)
@@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
        tunnel->encap = encap;
        if (encap == L2TP_ENCAPTYPE_UDP) {
-               struct udp_tunnel_sock_cfg udp_cfg;
+               struct udp_tunnel_sock_cfg udp_cfg = { };
 
                udp_cfg.sk_user_data = tunnel;
                udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
index 4c6404e1ad6e683da649f507829c19b232fcd417..21b1fdf5d01d50d751ac6c3cf86670ecf98850be 100644 (file)
@@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta)
                del_timer_sync(&sta->mesh->plink_timer);
        }
 
+       /* make sure no readers can access nexthop sta from here on */
+       mesh_path_flush_by_nexthop(sta);
+       synchronize_net();
+
        if (changed)
                ieee80211_mbss_info_change_notify(sdata, changed);
 }
index c8b8ccc370eb70c75ab43a8e013e60c1f280debd..78b0ef32ddddd6768377bf49da5acdfabf7e762f 100644 (file)
@@ -280,7 +280,7 @@ struct ieee80211_fast_tx {
        u8 sa_offs, da_offs, pn_offs;
        u8 band;
        u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
-              sizeof(rfc1042_header)];
+              sizeof(rfc1042_header)] __aligned(2);
 
        struct rcu_head rcu_head;
 };
index 2cb3c626cd4307a51e06b9f0bf5c7b92cdb961a9..096a45103f14cbbaccc07b7574b9fcfc7e55ff92 100644 (file)
@@ -762,7 +762,7 @@ static int expire_quiescent_template(struct netns_ipvs *ipvs,
  *     If available, return 1, otherwise invalidate this connection
  *     template and return 0.
  */
-int ip_vs_check_template(struct ip_vs_conn *ct)
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
 {
        struct ip_vs_dest *dest = ct->dest;
        struct netns_ipvs *ipvs = ct->ipvs;
@@ -772,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
         */
        if ((dest == NULL) ||
            !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
-           expire_quiescent_template(ipvs, dest)) {
+           expire_quiescent_template(ipvs, dest) ||
+           (cdest && (dest != cdest))) {
                IP_VS_DBG_BUF(9, "check_template: dest not available for "
                              "protocol %s s:%s:%d v:%s:%d "
                              "-> d:%s:%d\n",
index 1207f20d24e4a25050363e2d6b9e30cc6aaf03b5..2c1b498a7a271df0f6e3ffa86407b89ba103e9d9 100644 (file)
@@ -321,7 +321,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
 
        /* Check if a template already exists */
        ct = ip_vs_ct_in_get(&param);
-       if (!ct || !ip_vs_check_template(ct)) {
+       if (!ct || !ip_vs_check_template(ct, NULL)) {
                struct ip_vs_scheduler *sched;
 
                /*
@@ -1154,7 +1154,8 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
                                                  vport, &param) < 0)
                        return NULL;
                ct = ip_vs_ct_in_get(&param);
-               if (!ct) {
+               /* check if template exists and points to the same dest */
+               if (!ct || !ip_vs_check_template(ct, dest)) {
                        ct = ip_vs_conn_new(&param, dest->af, daddr, dport,
                                            IP_VS_CONN_F_TEMPLATE, dest, 0);
                        if (!ct) {
index 883c691ec8d05724d8a63734bba9a3a30ae06eb2..19efeba02abb0e8db4d4b9b0f7827d2d3808c69b 100644 (file)
@@ -632,6 +632,7 @@ static int __init nf_conntrack_ftp_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %d port: %d\n",
                                       ftp[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_ftp_fini();
                                return ret;
                        }
index f703adb7e5f7166ca47380aedd8ad0074e361ee4..196cb39649e1ec186108acfc417ae2d7bf8a0bb2 100644 (file)
@@ -361,9 +361,10 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_log);
 
 int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
 {
-       int ret = 0;
-       struct nf_conntrack_helper *cur;
+       struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
        unsigned int h = helper_hash(&me->tuple);
+       struct nf_conntrack_helper *cur;
+       int ret = 0;
 
        BUG_ON(me->expect_policy == NULL);
        BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
@@ -371,9 +372,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
 
        mutex_lock(&nf_ct_helper_mutex);
        hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
-               if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
-                   cur->tuple.src.l3num == me->tuple.src.l3num &&
-                   cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+               if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
                        ret = -EEXIST;
                        goto out;
                }
index 8b6da27196005f61d2269d8af92124096c85047e..f97ac61d2536aac5d0c73d6e86b1a6df36bc60fd 100644 (file)
@@ -271,6 +271,7 @@ static int __init nf_conntrack_irc_init(void)
                if (ret) {
                        pr_err("failed to register helper for pf: %u port: %u\n",
                               irc[i].tuple.src.l3num, ports[i]);
+                       ports_c = i;
                        nf_conntrack_irc_fini();
                        return ret;
                }
index 7523a575f6d16ded0c5724156cfe280598c5c505..3fcbaab83b3d34c728c2e55f55746156e78d2d89 100644 (file)
@@ -223,6 +223,7 @@ static int __init nf_conntrack_sane_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %d port: %d\n",
                                       sane[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_sane_fini();
                                return ret;
                        }
index 3e06402739e0acfd946fc8de04116aef0feea4ad..f72ba5587588fdaabb02232cbb33a0500ee2d422 100644 (file)
@@ -1669,6 +1669,7 @@ static int __init nf_conntrack_sip_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %u port: %u\n",
                                       sip[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_sip_fini();
                                return ret;
                        }
index f87e84ebcec3ac78cc4b941ec8828fab8c623f55..c026c472ea80d8dae3b57e6486c62d56903bca4e 100644 (file)
@@ -487,8 +487,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
        { }
 };
 
-#define NET_NF_CONNTRACK_MAX 2089
-
 static struct ctl_table nf_ct_netfilter_table[] = {
        {
                .procname       = "nf_conntrack_max",
index 36f9640664615bee6a640e11f4f18d1459952a81..2e65b5430fba6d09fa060aef44099360c033096c 100644 (file)
@@ -142,6 +142,7 @@ static int __init nf_conntrack_tftp_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %u port: %u\n",
                                       tftp[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_tftp_fini();
                                return ret;
                        }
index 5baa8e24e6ac1b512250c676cf0caf115f0b17f9..b19ad20a705ca7845ad21a5d272accba190f3f38 100644 (file)
  * Once the queue is registered it must reinject all packets it
  * receives, no matter what.
  */
-static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
 
 /* return EBUSY when somebody else is registered, return EEXIST if the
  * same handler is registered, return 0 in case of success. */
-void nf_register_queue_handler(const struct nf_queue_handler *qh)
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
 {
        /* should never happen, we only have one queueing backend in kernel */
-       WARN_ON(rcu_access_pointer(queue_handler));
-       rcu_assign_pointer(queue_handler, qh);
+       WARN_ON(rcu_access_pointer(net->nf.queue_handler));
+       rcu_assign_pointer(net->nf.queue_handler, qh);
 }
 EXPORT_SYMBOL(nf_register_queue_handler);
 
 /* The caller must flush their queue before this */
-void nf_unregister_queue_handler(void)
+void nf_unregister_queue_handler(struct net *net)
 {
-       RCU_INIT_POINTER(queue_handler, NULL);
-       synchronize_rcu();
+       RCU_INIT_POINTER(net->nf.queue_handler, NULL);
 }
 EXPORT_SYMBOL(nf_unregister_queue_handler);
 
@@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
        const struct nf_queue_handler *qh;
 
        rcu_read_lock();
-       qh = rcu_dereference(queue_handler);
+       qh = rcu_dereference(net->nf.queue_handler);
        if (qh)
                qh->nf_hook_drop(net, ops);
        rcu_read_unlock();
@@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb,
        struct nf_queue_entry *entry = NULL;
        const struct nf_afinfo *afinfo;
        const struct nf_queue_handler *qh;
+       struct net *net = state->net;
 
        /* QUEUE == DROP if no one is waiting, to be safe. */
-       qh = rcu_dereference(queue_handler);
+       qh = rcu_dereference(net->nf.queue_handler);
        if (!qh) {
                status = -ESRCH;
                goto err;
index 4d292b933b5c5c83d205f844f1a876ebefca708c..7b7aa871a174fa2a1fd1e002fe0838231035e305 100644 (file)
@@ -2647,6 +2647,8 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
        /* Only accept unspec with dump */
        if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
                return -EAFNOSUPPORT;
+       if (!nla[NFTA_SET_TABLE])
+               return -EINVAL;
 
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
        if (IS_ERR(set))
index aa93877ab6e2629bc31791135ace6ef474abd35e..5d36a0926b4a4859304fdd1808b428c87a2ee8c4 100644 (file)
@@ -557,7 +557,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 
        if (entskb->tstamp.tv64) {
                struct nfqnl_msg_packet_timestamp ts;
-               struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
+               struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
 
                ts.sec = cpu_to_be64(kts.tv_sec);
                ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
@@ -1482,21 +1482,29 @@ static int __net_init nfnl_queue_net_init(struct net *net)
                         net->nf.proc_netfilter, &nfqnl_file_ops))
                return -ENOMEM;
 #endif
+       nf_register_queue_handler(net, &nfqh);
        return 0;
 }
 
 static void __net_exit nfnl_queue_net_exit(struct net *net)
 {
+       nf_unregister_queue_handler(net);
 #ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
 #endif
 }
 
+static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
+{
+       synchronize_rcu();
+}
+
 static struct pernet_operations nfnl_queue_net_ops = {
-       .init   = nfnl_queue_net_init,
-       .exit   = nfnl_queue_net_exit,
-       .id     = &nfnl_queue_net_id,
-       .size   = sizeof(struct nfnl_queue_net),
+       .init           = nfnl_queue_net_init,
+       .exit           = nfnl_queue_net_exit,
+       .exit_batch     = nfnl_queue_net_exit_batch,
+       .id             = &nfnl_queue_net_id,
+       .size           = sizeof(struct nfnl_queue_net),
 };
 
 static int __init nfnetlink_queue_init(void)
@@ -1517,7 +1525,6 @@ static int __init nfnetlink_queue_init(void)
        }
 
        register_netdevice_notifier(&nfqnl_dev_notifier);
-       nf_register_queue_handler(&nfqh);
        return status;
 
 cleanup_netlink_notifier:
@@ -1529,7 +1536,6 @@ out:
 
 static void __exit nfnetlink_queue_fini(void)
 {
-       nf_unregister_queue_handler();
        unregister_netdevice_notifier(&nfqnl_dev_notifier);
        nfnetlink_subsys_unregister(&nfqnl_subsys);
        netlink_unregister_notifier(&nfqnl_rtnl_notifier);
index c69c892231d7b6b8699f20913847f64f1a758a42..2675d580c490b3de07562ce9174a0b4f69d55035 100644 (file)
@@ -612,7 +612,7 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems,
                return -EINVAL;
 
        if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
-           target_offset + sizeof(struct compat_xt_standard_target) != next_offset)
+           COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
                return -EINVAL;
 
        /* compat_xt_entry match has less strict aligment requirements,
@@ -694,7 +694,7 @@ int xt_check_entry_offsets(const void *base,
                return -EINVAL;
 
        if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
-           target_offset + sizeof(struct xt_standard_target) != next_offset)
+           XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
                return -EINVAL;
 
        return xt_check_entry_match(elems, base + target_offset,
index 4040eb92d9c9dc3b1ce0ba41227b6b1e9bd32eff..9bff6ef16fa7632fcfc05f23dd696d75ead6d5e8 100644 (file)
@@ -93,6 +93,7 @@
 #include <net/inet_common.h>
 #endif
 #include <linux/bpf.h>
+#include <net/compat.h>
 
 #include "internal.h"
 
@@ -3940,6 +3941,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 }
 
 
+#ifdef CONFIG_COMPAT
+static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
+                                   char __user *optval, unsigned int optlen)
+{
+       struct packet_sock *po = pkt_sk(sock->sk);
+
+       if (level != SOL_PACKET)
+               return -ENOPROTOOPT;
+
+       if (optname == PACKET_FANOUT_DATA &&
+           po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
+               optval = (char __user *)get_compat_bpf_fprog(optval);
+               if (!optval)
+                       return -EFAULT;
+               optlen = sizeof(struct sock_fprog);
+       }
+
+       return packet_setsockopt(sock, level, optname, optval, optlen);
+}
+#endif
+
 static int packet_notifier(struct notifier_block *this,
                           unsigned long msg, void *ptr)
 {
@@ -4416,6 +4438,9 @@ static const struct proto_ops packet_ops = {
        .shutdown =     sock_no_shutdown,
        .setsockopt =   packet_setsockopt,
        .getsockopt =   packet_getsockopt,
+#ifdef CONFIG_COMPAT
+       .compat_setsockopt = compat_packet_setsockopt,
+#endif
        .sendmsg =      packet_sendmsg,
        .recvmsg =      packet_recvmsg,
        .mmap =         packet_mmap,
index 80256b08eac0388d702f5981f00703d2118f5cce..387df5f32e49b9084363dc3da3ff62aa252e5507 100644 (file)
@@ -74,6 +74,7 @@ enum {
        RDS_CONN_CONNECTING,
        RDS_CONN_DISCONNECTING,
        RDS_CONN_UP,
+       RDS_CONN_RESETTING,
        RDS_CONN_ERROR,
 };
 
@@ -813,6 +814,7 @@ void rds_connect_worker(struct work_struct *);
 void rds_shutdown_worker(struct work_struct *);
 void rds_send_worker(struct work_struct *);
 void rds_recv_worker(struct work_struct *);
+void rds_connect_path_complete(struct rds_connection *conn, int curr);
 void rds_connect_complete(struct rds_connection *conn);
 
 /* transport.c */
index c0be1ecd11c99ce57360687fa2b34f312c0cf0d6..8413f6c99e13519d7cbfa1ec6ce3877a0059caa7 100644 (file)
@@ -561,5 +561,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
                minfo.fport = inc->i_hdr.h_dport;
        }
 
+       minfo.flags = 0;
+
        rds_info_copy(iter, &minfo, sizeof(minfo));
 }
index c9cdb358ea885e3e356cc675b579f1313ed94ff9..b1962f8e30f7bfc9fa77fb9bb413c42c81018e57 100644 (file)
@@ -99,6 +99,7 @@ void rds_send_reset(struct rds_connection *conn)
        list_splice_init(&conn->c_retrans, &conn->c_send_queue);
        spin_unlock_irqrestore(&conn->c_lock, flags);
 }
+EXPORT_SYMBOL_GPL(rds_send_reset);
 
 static int acquire_in_xmit(struct rds_connection *conn)
 {
index 86187dad14403100ff6199ee79beed5262de818f..74ee126a6fe6c00ce8bce45053ec2b8805fc222e 100644 (file)
@@ -126,9 +126,81 @@ void rds_tcp_restore_callbacks(struct socket *sock,
 }
 
 /*
- * This is the only path that sets tc->t_sock.  Send and receive trust that
- * it is set.  The RDS_CONN_UP bit protects those paths from being
- * called while it isn't set.
+ * rds_tcp_reset_callbacks() switches the to the new sock and
+ * returns the existing tc->t_sock.
+ *
+ * The only functions that set tc->t_sock are rds_tcp_set_callbacks
+ * and rds_tcp_reset_callbacks.  Send and receive trust that
+ * it is set.  The absence of RDS_CONN_UP bit protects those paths
+ * from being called while it isn't set.
+ */
+void rds_tcp_reset_callbacks(struct socket *sock,
+                            struct rds_connection *conn)
+{
+       struct rds_tcp_connection *tc = conn->c_transport_data;
+       struct socket *osock = tc->t_sock;
+
+       if (!osock)
+               goto newsock;
+
+       /* Need to resolve a duelling SYN between peers.
+        * We have an outstanding SYN to this peer, which may
+        * potentially have transitioned to the RDS_CONN_UP state,
+        * so we must quiesce any send threads before resetting
+        * c_transport_data. We quiesce these threads by setting
+        * c_state to something other than RDS_CONN_UP, and then
+        * waiting for any existing threads in rds_send_xmit to
+        * complete release_in_xmit(). (Subsequent threads entering
+        * rds_send_xmit() will bail on !rds_conn_up().
+        *
+        * However an incoming syn-ack at this point would end up
+        * marking the conn as RDS_CONN_UP, and would again permit
+        * rds_send_xmi() threads through, so ideally we would
+        * synchronize on RDS_CONN_UP after lock_sock(), but cannot
+        * do that: waiting on !RDS_IN_XMIT after lock_sock() may
+        * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
+        * would not get set. As a result, we set c_state to
+        * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
+        * cannot mark rds_conn_path_up() in the window before lock_sock()
+        */
+       atomic_set(&conn->c_state, RDS_CONN_RESETTING);
+       wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags));
+       lock_sock(osock->sk);
+       /* reset receive side state for rds_tcp_data_recv() for osock  */
+       if (tc->t_tinc) {
+               rds_inc_put(&tc->t_tinc->ti_inc);
+               tc->t_tinc = NULL;
+       }
+       tc->t_tinc_hdr_rem = sizeof(struct rds_header);
+       tc->t_tinc_data_rem = 0;
+       tc->t_sock = NULL;
+
+       write_lock_bh(&osock->sk->sk_callback_lock);
+
+       osock->sk->sk_user_data = NULL;
+       osock->sk->sk_data_ready = tc->t_orig_data_ready;
+       osock->sk->sk_write_space = tc->t_orig_write_space;
+       osock->sk->sk_state_change = tc->t_orig_state_change;
+       write_unlock_bh(&osock->sk->sk_callback_lock);
+       release_sock(osock->sk);
+       sock_release(osock);
+newsock:
+       rds_send_reset(conn);
+       lock_sock(sock->sk);
+       write_lock_bh(&sock->sk->sk_callback_lock);
+       tc->t_sock = sock;
+       sock->sk->sk_user_data = conn;
+       sock->sk->sk_data_ready = rds_tcp_data_ready;
+       sock->sk->sk_write_space = rds_tcp_write_space;
+       sock->sk->sk_state_change = rds_tcp_state_change;
+
+       write_unlock_bh(&sock->sk->sk_callback_lock);
+       release_sock(sock->sk);
+}
+
+/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
+ * above rds_tcp_reset_callbacks for notes about synchronization
+ * with data path
  */
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
 {
index 41c228300525c029df02472b609ada1a71cea98e..ec0602b0dc24b443c40437d5697037082d270f44 100644 (file)
@@ -50,6 +50,7 @@ struct rds_tcp_statistics {
 void rds_tcp_tune(struct socket *sock);
 void rds_tcp_nonagle(struct socket *sock);
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
+void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn);
 void rds_tcp_restore_callbacks(struct socket *sock,
                               struct rds_tcp_connection *tc);
 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
index fb82e0a0bf89361e005a23c371f0e95dfa4970bc..fba13d0305fb25f12c476e6014a2a9d4c5559075 100644 (file)
@@ -60,7 +60,7 @@ void rds_tcp_state_change(struct sock *sk)
                case TCP_SYN_RECV:
                        break;
                case TCP_ESTABLISHED:
-                       rds_connect_complete(conn);
+                       rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
                        break;
                case TCP_CLOSE_WAIT:
                case TCP_CLOSE:
index 4bf4befe5066a0084a7430cc39f9fd0c08a9a20e..686b1d03a55858aeb59d9863339c2c8e99e03d90 100644 (file)
@@ -78,7 +78,6 @@ int rds_tcp_accept_one(struct socket *sock)
        struct inet_sock *inet;
        struct rds_tcp_connection *rs_tcp = NULL;
        int conn_state;
-       struct sock *nsk;
 
        if (!sock) /* module unload or netns delete in progress */
                return -ENETUNREACH;
@@ -136,26 +135,21 @@ int rds_tcp_accept_one(struct socket *sock)
                    !conn->c_outgoing) {
                        goto rst_nsk;
                } else {
-                       atomic_set(&conn->c_state, RDS_CONN_CONNECTING);
-                       wait_event(conn->c_waitq,
-                                  !test_bit(RDS_IN_XMIT, &conn->c_flags));
-                       rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+                       rds_tcp_reset_callbacks(new_sock, conn);
                        conn->c_outgoing = 0;
+                       /* rds_connect_path_complete() marks RDS_CONN_UP */
+                       rds_connect_path_complete(conn, RDS_CONN_DISCONNECTING);
                }
+       } else {
+               rds_tcp_set_callbacks(new_sock, conn);
+               rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
        }
-       rds_tcp_set_callbacks(new_sock, conn);
-       rds_connect_complete(conn); /* marks RDS_CONN_UP */
        new_sock = NULL;
        ret = 0;
        goto out;
 rst_nsk:
        /* reset the newly returned accept sock and bail */
-       nsk = new_sock->sk;
-       rds_tcp_stats_inc(s_tcp_listen_closed_stale);
-       nsk->sk_user_data = NULL;
-       nsk->sk_prot->disconnect(nsk, 0);
-       tcp_done(nsk);
-       new_sock = NULL;
+       kernel_sock_shutdown(new_sock, SHUT_RDWR);
        ret = 0;
 out:
        if (rs_tcp)
index 454aa6d23327b2a302bcfc72b6b3d857656e33ed..4a323045719b5c0aa0407916a772fe65aa900b0b 100644 (file)
@@ -71,9 +71,9 @@
 struct workqueue_struct *rds_wq;
 EXPORT_SYMBOL_GPL(rds_wq);
 
-void rds_connect_complete(struct rds_connection *conn)
+void rds_connect_path_complete(struct rds_connection *conn, int curr)
 {
-       if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) {
+       if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) {
                printk(KERN_WARNING "%s: Cannot transition to state UP, "
                                "current state is %d\n",
                                __func__,
@@ -90,6 +90,12 @@ void rds_connect_complete(struct rds_connection *conn)
        queue_delayed_work(rds_wq, &conn->c_send_w, 0);
        queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 }
+EXPORT_SYMBOL_GPL(rds_connect_path_complete);
+
+void rds_connect_complete(struct rds_connection *conn)
+{
+       rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+}
 EXPORT_SYMBOL_GPL(rds_connect_complete);
 
 /*
index 6b726a046a7d47995ecc281f67979073fe02c102..bab56ed649ba034847375de135b7b17357f0e532 100644 (file)
@@ -1162,9 +1162,7 @@ static int rxkad_init(void)
        /* pin the cipher we need so that the crypto layer doesn't invoke
         * keventd to go get it */
        rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(rxkad_ci))
-               return PTR_ERR(rxkad_ci);
-       return 0;
+       return PTR_ERR_OR_ZERO(rxkad_ci);
 }
 
 /*
index b884dae692a162fccf5204f1dad27734748a69c2..c557789765dc6fed833a27b7d0353e3bda0a477f 100644 (file)
@@ -38,7 +38,7 @@ struct tcf_police {
        bool                    peak_present;
 };
 #define to_police(pc)  \
-       container_of(pc, struct tcf_police, common)
+       container_of(pc->priv, struct tcf_police, common)
 
 #define POL_TAB_MASK     15
 
@@ -119,14 +119,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
                                 struct nlattr *est, struct tc_action *a,
                                 int ovr, int bind)
 {
-       unsigned int h;
        int ret = 0, err;
        struct nlattr *tb[TCA_POLICE_MAX + 1];
        struct tc_police *parm;
        struct tcf_police *police;
        struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
        struct tc_action_net *tn = net_generic(net, police_net_id);
-       struct tcf_hashinfo *hinfo = tn->hinfo;
        int size;
 
        if (nla == NULL)
@@ -145,7 +143,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
 
        if (parm->index) {
                if (tcf_hash_search(tn, a, parm->index)) {
-                       police = to_police(a->priv);
+                       police = to_police(a);
                        if (bind) {
                                police->tcf_bindcnt += 1;
                                police->tcf_refcnt += 1;
@@ -156,16 +154,15 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
                        /* not replacing */
                        return -EEXIST;
                }
+       } else {
+               ret = tcf_hash_create(tn, parm->index, NULL, a,
+                                     sizeof(*police), bind, false);
+               if (ret)
+                       return ret;
+               ret = ACT_P_CREATED;
        }
 
-       police = kzalloc(sizeof(*police), GFP_KERNEL);
-       if (police == NULL)
-               return -ENOMEM;
-       ret = ACT_P_CREATED;
-       police->tcf_refcnt = 1;
-       spin_lock_init(&police->tcf_lock);
-       if (bind)
-               police->tcf_bindcnt = 1;
+       police = to_police(a);
 override:
        if (parm->rate.rate) {
                err = -ENOMEM;
@@ -237,16 +234,8 @@ override:
                return ret;
 
        police->tcfp_t_c = ktime_get_ns();
-       police->tcf_index = parm->index ? parm->index :
-               tcf_hash_new_index(tn);
-       police->tcf_tm.install = jiffies;
-       police->tcf_tm.lastuse = jiffies;
-       h = tcf_hash(police->tcf_index, POL_TAB_MASK);
-       spin_lock_bh(&hinfo->lock);
-       hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
-       spin_unlock_bh(&hinfo->lock);
+       tcf_hash_insert(tn, a);
 
-       a->priv = police;
        return ret;
 
 failure_unlock:
@@ -255,7 +244,7 @@ failure:
        qdisc_put_rtab(P_tab);
        qdisc_put_rtab(R_tab);
        if (ret == ACT_P_CREATED)
-               kfree(police);
+               tcf_hash_cleanup(a, est);
        return err;
 }
 
index 730aacafc22d8638ccebce757cc5b6f8968bd39f..b3b7978f418214104d5d360aa4ea6154b36fc038 100644 (file)
@@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
        struct tc_cls_flower_offload offload = {0};
        struct tc_to_netdev tc;
 
-       if (!tc_should_offload(dev, 0))
+       if (!tc_should_offload(dev, tp, 0))
                return;
 
        offload.command = TC_CLSFLOWER_DESTROY;
@@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
        struct tc_cls_flower_offload offload = {0};
        struct tc_to_netdev tc;
 
-       if (!tc_should_offload(dev, flags))
+       if (!tc_should_offload(dev, tp, flags))
                return;
 
        offload.command = TC_CLSFLOWER_REPLACE;
@@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
        struct tc_cls_flower_offload offload = {0};
        struct tc_to_netdev tc;
 
-       if (!tc_should_offload(dev, 0))
+       if (!tc_should_offload(dev, tp, 0))
                return;
 
        offload.command = TC_CLSFLOWER_STATS;
index 079b43b3c5d24dc5eaf7bc5e7b7d0d4d0c672ca2..ffe593efe930680d4ec1a92b619256f12cb9ecda 100644 (file)
@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, 0)) {
+       if (tc_should_offload(dev, tp, 0)) {
                offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
                offload.cls_u32->knode.handle = handle;
                dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
@@ -457,20 +457,21 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp,
        struct tc_to_netdev offload;
        int err;
 
+       if (!tc_should_offload(dev, tp, flags))
+               return tc_skip_sw(flags) ? -EINVAL : 0;
+
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, flags)) {
-               offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
-               offload.cls_u32->hnode.divisor = h->divisor;
-               offload.cls_u32->hnode.handle = h->handle;
-               offload.cls_u32->hnode.prio = h->prio;
+       offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
+       offload.cls_u32->hnode.divisor = h->divisor;
+       offload.cls_u32->hnode.handle = h->handle;
+       offload.cls_u32->hnode.prio = h->prio;
 
-               err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                                   tp->protocol, &offload);
-               if (tc_skip_sw(flags))
-                       return err;
-       }
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                           tp->protocol, &offload);
+       if (tc_skip_sw(flags))
+               return err;
 
        return 0;
 }
@@ -484,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, 0)) {
+       if (tc_should_offload(dev, tp, 0)) {
                offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
                offload.cls_u32->hnode.divisor = h->divisor;
                offload.cls_u32->hnode.handle = h->handle;
@@ -507,27 +508,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp,
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, flags)) {
-               offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
-               offload.cls_u32->knode.handle = n->handle;
-               offload.cls_u32->knode.fshift = n->fshift;
+       if (!tc_should_offload(dev, tp, flags))
+               return tc_skip_sw(flags) ? -EINVAL : 0;
+
+       offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
+       offload.cls_u32->knode.handle = n->handle;
+       offload.cls_u32->knode.fshift = n->fshift;
 #ifdef CONFIG_CLS_U32_MARK
-               offload.cls_u32->knode.val = n->val;
-               offload.cls_u32->knode.mask = n->mask;
+       offload.cls_u32->knode.val = n->val;
+       offload.cls_u32->knode.mask = n->mask;
 #else
-               offload.cls_u32->knode.val = 0;
-               offload.cls_u32->knode.mask = 0;
+       offload.cls_u32->knode.val = 0;
+       offload.cls_u32->knode.mask = 0;
 #endif
-               offload.cls_u32->knode.sel = &n->sel;
-               offload.cls_u32->knode.exts = &n->exts;
-               if (n->ht_down)
-                       offload.cls_u32->knode.link_handle = n->ht_down->handle;
-
-               err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                                   tp->protocol, &offload);
-               if (tc_skip_sw(flags))
-                       return err;
-       }
+       offload.cls_u32->knode.sel = &n->sel;
+       offload.cls_u32->knode.exts = &n->exts;
+       if (n->ht_down)
+               offload.cls_u32->knode.link_handle = n->ht_down->handle;
+
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                           tp->protocol, &offload);
+       if (tc_skip_sw(flags))
+               return err;
 
        return 0;
 }
@@ -863,7 +865,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        if (tb[TCA_U32_FLAGS]) {
                flags = nla_get_u32(tb[TCA_U32_FLAGS]);
                if (!tc_flags_valid(flags))
-                       return err;
+                       return -EINVAL;
        }
 
        n = (struct tc_u_knode *)*arg;
@@ -921,11 +923,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                ht->divisor = divisor;
                ht->handle = handle;
                ht->prio = tp->prio;
+
+               err = u32_replace_hw_hnode(tp, ht, flags);
+               if (err) {
+                       kfree(ht);
+                       return err;
+               }
+
                RCU_INIT_POINTER(ht->next, tp_c->hlist);
                rcu_assign_pointer(tp_c->hlist, ht);
                *arg = (unsigned long)ht;
 
-               u32_replace_hw_hnode(tp, ht, flags);
                return 0;
        }
 
index a63e879e89758fe954ab778fe776ddf6fc4e3536..bf8af2c43c2ce8b988b26f58d04074d367f987a6 100644 (file)
@@ -375,6 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                cl->deficit = cl->quantum;
        }
 
+       qdisc_qstats_backlog_inc(sch, skb);
        sch->q.qlen++;
        return err;
 }
@@ -407,6 +408,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 
                        bstats_update(&cl->bstats, skb);
                        qdisc_bstats_update(sch, skb);
+                       qdisc_qstats_backlog_dec(sch, skb);
                        sch->q.qlen--;
                        return skb;
                }
@@ -428,6 +430,7 @@ static unsigned int drr_drop(struct Qdisc *sch)
                if (cl->qdisc->ops->drop) {
                        len = cl->qdisc->ops->drop(cl->qdisc);
                        if (len > 0) {
+                               sch->qstats.backlog -= len;
                                sch->q.qlen--;
                                if (cl->qdisc->q.qlen == 0)
                                        list_del(&cl->alist);
@@ -463,6 +466,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
                        qdisc_reset(cl->qdisc);
                }
        }
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
index 6883a89715625fc7874780ef962e51dc7773a35b..da250b2e06ae3b2cc054ff1d31e29649a74f6e13 100644 (file)
@@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        unsigned int idx, prev_backlog, prev_qlen;
        struct fq_codel_flow *flow;
        int uninitialized_var(ret);
+       unsigned int pkt_len;
        bool memory_limited;
 
        idx = fq_codel_classify(skb, sch, &ret);
@@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        prev_backlog = sch->qstats.backlog;
        prev_qlen = sch->q.qlen;
 
+       /* save this packet length as it might be dropped by fq_codel_drop() */
+       pkt_len = qdisc_pkt_len(skb);
        /* fq_codel_drop() is quite expensive, as it performs a linear search
         * in q->backlogs[] to find a fat flow.
         * So instead of dropping a single packet, drop half of its backlog
@@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
         */
        ret = fq_codel_drop(sch, q->drop_batch_size);
 
-       q->drop_overlimit += prev_qlen - sch->q.qlen;
+       prev_qlen -= sch->q.qlen;
+       prev_backlog -= sch->qstats.backlog;
+       q->drop_overlimit += prev_qlen;
        if (memory_limited)
-               q->drop_overmemory += prev_qlen - sch->q.qlen;
-       /* As we dropped packet(s), better let upper stack know this */
-       qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
-                                 prev_backlog - sch->qstats.backlog);
+               q->drop_overmemory += prev_qlen;
 
-       return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+       /* As we dropped packet(s), better let upper stack know this.
+        * If we dropped a packet for this flow, return NET_XMIT_CN,
+        * but in this case, our parents wont increase their backlogs.
+        */
+       if (ret == idx) {
+               qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+                                         prev_backlog - pkt_len);
+               return NET_XMIT_CN;
+       }
+       qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
+       return NET_XMIT_SUCCESS;
 }
 
 /* This is the specific function called from codel_dequeue()
@@ -649,7 +661,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                qs.backlog = q->backlogs[idx];
                qs.drops = flow->dropped;
        }
-       if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
+       if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
                return -1;
        if (idx < q->flows_cnt)
                return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 269dd71b3828c03867c5dbbe7b041ad4babcf1f1..f9e0e9c03d0a3fba3daf7fb420297ad7e859dccf 100644 (file)
@@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 {
        q->gso_skb = skb;
        q->qstats.requeues++;
+       qdisc_qstats_backlog_inc(q, skb);
        q->q.qlen++;    /* it's still part of the queue */
        __netif_schedule(q);
 
@@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
                txq = skb_get_tx_queue(txq->dev, skb);
                if (!netif_xmit_frozen_or_stopped(txq)) {
                        q->gso_skb = NULL;
+                       qdisc_qstats_backlog_dec(q, skb);
                        q->q.qlen--;
                } else
                        skb = NULL;
index d783d7cc33487f284677e107f5083588f559f742..1ac9f9f03fe31fdf8d63eccfad0c7a13939183c6 100644 (file)
@@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch)
        q->eligible = RB_ROOT;
        INIT_LIST_HEAD(&q->droplist);
        qdisc_watchdog_cancel(&q->watchdog);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
@@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
        struct hfsc_sched *q = qdisc_priv(sch);
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_hfsc_qopt qopt;
-       struct hfsc_class *cl;
-       unsigned int i;
-
-       sch->qstats.backlog = 0;
-       for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
-                       sch->qstats.backlog += cl->qdisc->qstats.backlog;
-       }
 
        qopt.defcls = q->defcls;
        if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
@@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (cl->qdisc->q.qlen == 1)
                set_active(cl, qdisc_pkt_len(skb));
 
+       qdisc_qstats_backlog_inc(sch, skb);
        sch->q.qlen++;
 
        return NET_XMIT_SUCCESS;
@@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch)
 
        qdisc_unthrottled(sch);
        qdisc_bstats_update(sch, skb);
+       qdisc_qstats_backlog_dec(sch, skb);
        sch->q.qlen--;
 
        return skb;
@@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch)
                        }
                        cl->qstats.drops++;
                        qdisc_qstats_drop(sch);
+                       sch->qstats.backlog -= len;
                        sch->q.qlen--;
                        return len;
                }
index 10adbc617905e4386becd7fe272ecad5f96ee086..8fe6999b642ac3c039f40621f4ca123d129718c4 100644 (file)
@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
        return TC_H_MIN(classid) + 1;
 }
 
+static bool ingress_cl_offload(u32 classid)
+{
+       return true;
+}
+
 static unsigned long ingress_bind_filter(struct Qdisc *sch,
                                         unsigned long parent, u32 classid)
 {
@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
        .put            =       ingress_put,
        .walk           =       ingress_walk,
        .tcf_chain      =       ingress_find_tcf,
+       .tcf_cl_offload =       ingress_cl_offload,
        .bind_tcf       =       ingress_bind_filter,
        .unbind_tcf     =       ingress_put,
 };
@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
        }
 }
 
+static bool clsact_cl_offload(u32 classid)
+{
+       return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
+}
+
 static unsigned long clsact_bind_filter(struct Qdisc *sch,
                                        unsigned long parent, u32 classid)
 {
@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
        .put            =       ingress_put,
        .walk           =       ingress_walk,
        .tcf_chain      =       clsact_find_tcf,
+       .tcf_cl_offload =       clsact_cl_offload,
        .bind_tcf       =       clsact_bind_filter,
        .unbind_tcf     =       ingress_put,
 };
index fee1b15506b299755c685502a4a33283b458fef5..4b0a82191bc4011c9574a029fe8c30133c5f6f50 100644 (file)
@@ -85,6 +85,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        ret = qdisc_enqueue(skb, qdisc);
        if (ret == NET_XMIT_SUCCESS) {
+               qdisc_qstats_backlog_inc(sch, skb);
                sch->q.qlen++;
                return NET_XMIT_SUCCESS;
        }
@@ -117,6 +118,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
                struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
                if (skb) {
                        qdisc_bstats_update(sch, skb);
+                       qdisc_qstats_backlog_dec(sch, skb);
                        sch->q.qlen--;
                        return skb;
                }
@@ -135,6 +137,7 @@ static unsigned int prio_drop(struct Qdisc *sch)
        for (prio = q->bands-1; prio >= 0; prio--) {
                qdisc = q->queues[prio];
                if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
+                       sch->qstats.backlog -= len;
                        sch->q.qlen--;
                        return len;
                }
@@ -151,6 +154,7 @@ prio_reset(struct Qdisc *sch)
 
        for (prio = 0; prio < q->bands; prio++)
                qdisc_reset(q->queues[prio]);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
index 8d2d8d953432e83d8523cc42923793eb852f599c..f18857febdad0adcae567beaebd2520ec9932ce6 100644 (file)
@@ -1235,8 +1235,10 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                         cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
                err = qfq_change_agg(sch, cl, cl->agg->class_weight,
                                     qdisc_pkt_len(skb));
-               if (err)
-                       return err;
+               if (err) {
+                       cl->qstats.drops++;
+                       return qdisc_drop(skb, sch);
+               }
        }
 
        err = qdisc_enqueue(skb, cl->qdisc);
index 8c0508c0e287742a8fbbcbf49134e76e9e52b807..91578bdd378c943e66137fd986c1df6f1674bd27 100644 (file)
@@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        ret = qdisc_enqueue(skb, child);
        if (likely(ret == NET_XMIT_SUCCESS)) {
+               qdisc_qstats_backlog_inc(sch, skb);
                sch->q.qlen++;
        } else if (net_xmit_drop_count(ret)) {
                q->stats.pdrop++;
@@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
        skb = child->dequeue(child);
        if (skb) {
                qdisc_bstats_update(sch, skb);
+               qdisc_qstats_backlog_dec(sch, skb);
                sch->q.qlen--;
        } else {
                if (!red_is_idling(&q->vars))
@@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch)
        if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
                q->stats.other++;
                qdisc_qstats_drop(sch);
+               sch->qstats.backlog -= len;
                sch->q.qlen--;
                return len;
        }
@@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch)
        struct red_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset(q->qdisc);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
        red_restart(&q->vars);
 }
index 83b90b584fae4fcee779bdeb638df5c59f40f007..3161e491990b29254fdcb54ad3ff5d18dbf4d16f 100644 (file)
@@ -207,6 +207,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                return ret;
        }
 
+       qdisc_qstats_backlog_inc(sch, skb);
        sch->q.qlen++;
        return NET_XMIT_SUCCESS;
 }
@@ -217,6 +218,7 @@ static unsigned int tbf_drop(struct Qdisc *sch)
        unsigned int len = 0;
 
        if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
+               sch->qstats.backlog -= len;
                sch->q.qlen--;
                qdisc_qstats_drop(sch);
        }
@@ -263,6 +265,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
                        q->t_c = now;
                        q->tokens = toks;
                        q->ptokens = ptoks;
+                       qdisc_qstats_backlog_dec(sch, skb);
                        sch->q.qlen--;
                        qdisc_unthrottled(sch);
                        qdisc_bstats_update(sch, skb);
@@ -294,6 +297,7 @@ static void tbf_reset(struct Qdisc *sch)
        struct tbf_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset(q->qdisc);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
        q->t_c = ktime_get_ns();
        q->tokens = q->buffer;
index f795b1dd0ccdf3dc088e16e1e9ad3ae9cc978d16..3ad9fab1985f1cdca35c876f0f228d28b9b5db00 100644 (file)
@@ -604,7 +604,8 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
 
        link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
        link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
-       strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
+       nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
+                   TIPC_MAX_LINK_NAME);
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
                            &link_info, sizeof(link_info));
index d25c82bc1bbe8e80ee0761b466e16088040c717d..ecca3896b9f7a2f18f1cdcfa2d38b2d857decf27 100644 (file)
@@ -363,8 +363,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
        WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
        WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
        WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
-       WARN_ON(ops->set_tx_power && !ops->get_tx_power);
-       WARN_ON(ops->set_antenna && !ops->get_antenna);
 
        alloc_size = sizeof(*rdev) + sizeof_priv;
 
index 6250b1cfcde58758bb480758d1c61217d37a7cd1..dbb2738e356ad83785caa748a0b84352a189bb8b 100644 (file)
@@ -958,8 +958,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
                        return private(dev, iwr, cmd, info, handler);
        }
        /* Old driver API : call driver ioctl handler */
-       if (dev->netdev_ops->ndo_do_ioctl)
-               return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+       if (dev->netdev_ops->ndo_do_ioctl) {
+#ifdef CONFIG_COMPAT
+               if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+                       int ret = 0;
+                       struct iwreq iwr_lcl;
+                       struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
+
+                       memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
+                       iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
+                       iwr_lcl.u.data.length = iwp_compat->length;
+                       iwr_lcl.u.data.flags = iwp_compat->flags;
+
+                       ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
+
+                       iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
+                       iwp_compat->length = iwr_lcl.u.data.length;
+                       iwp_compat->flags = iwr_lcl.u.data.flags;
+
+                       return ret;
+               } else
+#endif
+                       return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+       }
        return -EOPNOTSUPP;
 }
 
index a9155077feefb957d38dc29b4823a9529a7ee0e9..fec75786f75bbb348d03143af7d2280015394643 100644 (file)
@@ -384,7 +384,7 @@ static void do_of_entry_multi(void *symval, struct module *mod)
        len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
                      (*type)[0] ? *type : "*");
 
-       if (compatible[0])
+       if ((*compatible)[0])
                sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
                        *compatible);
 
index 9a0d1445ca5cf85c774a6f4e4be1d5a44770adad..94089fc71884a93e38ddbbefdad9a978d02430c6 100644 (file)
@@ -365,8 +365,11 @@ enum {
 
 #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
 #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
+#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
+                       IS_KBL(pci) || IS_KBL_LP(pci)
 
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2181,6 +2184,12 @@ static const struct pci_device_id azx_ids[] = {
        /* Sunrise Point-LP */
        { PCI_DEVICE(0x8086, 0x9d70),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Kabylake */
+       { PCI_DEVICE(0x8086, 0xa171),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Kabylake-LP */
+       { PCI_DEVICE(0x8086, 0x9d71),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
index d53c25e7a1c19473d98d001407c5321fc0578e8d..0fe18ede3e8555474a698bb71ae0cd855efc161c 100644 (file)
@@ -346,6 +346,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0234:
        case 0x10ec0274:
        case 0x10ec0294:
+       case 0x10ec0700:
+       case 0x10ec0701:
+       case 0x10ec0703:
                alc_update_coef_idx(codec, 0x10, 1<<15, 0);
                break;
        case 0x10ec0662:
@@ -2655,6 +2658,7 @@ enum {
        ALC269_TYPE_ALC256,
        ALC269_TYPE_ALC225,
        ALC269_TYPE_ALC294,
+       ALC269_TYPE_ALC700,
 };
 
 /*
@@ -2686,6 +2690,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC256:
        case ALC269_TYPE_ALC225:
        case ALC269_TYPE_ALC294:
+       case ALC269_TYPE_ALC700:
                ssids = alc269_ssids;
                break;
        default:
@@ -3618,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
 static void alc_headset_mode_unplugged(struct hda_codec *codec)
 {
        static struct coef_fw coef0255[] = {
-               WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
                WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
                UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
                WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
                WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
                {}
        };
+       static struct coef_fw coef0255_1[] = {
+               WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
+               {}
+       };
+       static struct coef_fw coef0256[] = {
+               WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
+               {}
+       };
        static struct coef_fw coef0233[] = {
                WRITE_COEF(0x1b, 0x0c0b),
                WRITE_COEF(0x45, 0xc429),
@@ -3677,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
+               alc_process_coef_fw(codec, coef0255_1);
+               alc_process_coef_fw(codec, coef0255);
+               break;
        case 0x10ec0256:
+               alc_process_coef_fw(codec, coef0256);
                alc_process_coef_fw(codec, coef0255);
                break;
        case 0x10ec0233:
@@ -3896,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
                WRITE_COEFEX(0x57, 0x03, 0x8ea6),
                {}
        };
+       static struct coef_fw coef0256[] = {
+               WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
+               WRITE_COEF(0x1b, 0x0c6b),
+               WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+               {}
+       };
        static struct coef_fw coef0233[] = {
                WRITE_COEF(0x45, 0xd429),
                WRITE_COEF(0x1b, 0x0c2b),
@@ -3936,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
-       case 0x10ec0256:
                alc_process_coef_fw(codec, coef0255);
                break;
+       case 0x10ec0256:
+               alc_process_coef_fw(codec, coef0256);
+               break;
        case 0x10ec0233:
        case 0x10ec0283:
                alc_process_coef_fw(codec, coef0233);
@@ -3978,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
                WRITE_COEFEX(0x57, 0x03, 0x8ea6),
                {}
        };
+       static struct coef_fw coef0256[] = {
+               WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
+               WRITE_COEF(0x1b, 0x0c6b),
+               WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+               {}
+       };
        static struct coef_fw coef0233[] = {
                WRITE_COEF(0x45, 0xe429),
                WRITE_COEF(0x1b, 0x0c2b),
@@ -4018,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
-       case 0x10ec0256:
                alc_process_coef_fw(codec, coef0255);
                break;
+       case 0x10ec0256:
+               alc_process_coef_fw(codec, coef0256);
+               break;
        case 0x10ec0233:
        case 0x10ec0283:
                alc_process_coef_fw(codec, coef0233);
@@ -4266,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
 static void alc255_set_default_jack_type(struct hda_codec *codec)
 {
        /* Set to iphone type */
-       static struct coef_fw fw[] = {
+       static struct coef_fw alc255fw[] = {
                WRITE_COEF(0x1b, 0x880b),
                WRITE_COEF(0x45, 0xd089),
                WRITE_COEF(0x1b, 0x080b),
@@ -4274,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
                WRITE_COEF(0x1b, 0x0c0b),
                {}
        };
-       alc_process_coef_fw(codec, fw);
+       static struct coef_fw alc256fw[] = {
+               WRITE_COEF(0x1b, 0x884b),
+               WRITE_COEF(0x45, 0xd089),
+               WRITE_COEF(0x1b, 0x084b),
+               WRITE_COEF(0x46, 0x0004),
+               WRITE_COEF(0x1b, 0x0c4b),
+               {}
+       };
+       switch (codec->core.vendor_id) {
+       case 0x10ec0255:
+               alc_process_coef_fw(codec, alc255fw);
+               break;
+       case 0x10ec0256:
+               alc_process_coef_fw(codec, alc256fw);
+               break;
+       }
        msleep(30);
 }
 
@@ -5587,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
@@ -5775,6 +5823,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60180},
                {0x14, 0x90170130},
                {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60180},
+               {0x14, 0x90170120},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60160},
                {0x14, 0x90170120},
@@ -6053,6 +6105,14 @@ static int patch_alc269(struct hda_codec *codec)
        case 0x10ec0294:
                spec->codec_variant = ALC269_TYPE_ALC294;
                break;
+       case 0x10ec0700:
+       case 0x10ec0701:
+       case 0x10ec0703:
+               spec->codec_variant = ALC269_TYPE_ALC700;
+               spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
+               alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+               break;
+
        }
 
        if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -7008,6 +7068,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
+       HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
        HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
index bbf69d248ec56ad187f68969367b15c282fbb7fe..9f53020c32697e0aeb52de6ab8fa8d24a0ac24fa 100644 (file)
@@ -204,6 +204,44 @@ static unsigned long long adjust_signedness(unsigned long long value_int, int si
        return (value_int & value_mask) | ~value_mask;
 }
 
+static int string_set_value(struct bt_ctf_field *field, const char *string)
+{
+       char *buffer = NULL;
+       size_t len = strlen(string), i, p;
+       int err;
+
+       for (i = p = 0; i < len; i++, p++) {
+               if (isprint(string[i])) {
+                       if (!buffer)
+                               continue;
+                       buffer[p] = string[i];
+               } else {
+                       char numstr[5];
+
+                       snprintf(numstr, sizeof(numstr), "\\x%02x",
+                                (unsigned int)(string[i]) & 0xff);
+
+                       if (!buffer) {
+                               buffer = zalloc(i + (len - i) * 4 + 2);
+                               if (!buffer) {
+                                       pr_err("failed to set unprintable string '%s'\n", string);
+                                       return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
+                               }
+                               if (i > 0)
+                                       strncpy(buffer, string, i);
+                       }
+                       strncat(buffer + p, numstr, 4);
+                       p += 3;
+               }
+       }
+
+       if (!buffer)
+               return bt_ctf_field_string_set_value(field, string);
+       err = bt_ctf_field_string_set_value(field, buffer);
+       free(buffer);
+       return err;
+}
+
 static int add_tracepoint_field_value(struct ctf_writer *cw,
                                      struct bt_ctf_event_class *event_class,
                                      struct bt_ctf_event *event,
@@ -270,8 +308,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
                }
 
                if (flags & FIELD_IS_STRING)
-                       ret = bt_ctf_field_string_set_value(field,
-                                       data + offset + i * len);
+                       ret = string_set_value(field, data + offset + i * len);
                else {
                        unsigned long long value_int;
 
index f6fcc68329499f255ddc1a5012dd36fdc04d8024..9b141f12329edc750de5eefd47c3c7f2cb46e503 100644 (file)
@@ -673,6 +673,8 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
        int err;
        union perf_event *event;
 
+       if (symbol_conf.kptr_restrict)
+               return -1;
        if (map == NULL)
                return -1;
 
index 20f9cb32b703cd10330fa80ed328a16126a59a04..54c4ff2b1cee298335a9cdecfd3d07a43f51d241 100644 (file)
@@ -1933,17 +1933,17 @@ int setup_intlist(struct intlist **list, const char *list_str,
 static bool symbol__read_kptr_restrict(void)
 {
        bool value = false;
+       FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
 
-       if (geteuid() != 0) {
-               FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
-               if (fp != NULL) {
-                       char line[8];
+       if (fp != NULL) {
+               char line[8];
 
-                       if (fgets(line, sizeof(line), fp) != NULL)
-                               value = atoi(line) != 0;
+               if (fgets(line, sizeof(line), fp) != NULL)
+                       value = (geteuid() != 0) ?
+                                       (atoi(line) != 0) :
+                                       (atoi(line) == 2);
 
-                       fclose(fp);
-               }
+               fclose(fp);
        }
 
        return value;
index 96ba386b1b7bb1c4f7c5ff7d9960f65a731d2426..4a8217448f20183efc5933bb98a73607f6f34476 100644 (file)
@@ -111,9 +111,9 @@ static void attach_ebpf(int fd, uint16_t mod)
        memset(&attr, 0, sizeof(attr));
        attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
        attr.insn_cnt = ARRAY_SIZE(prog);
-       attr.insns = (uint64_t)prog;
-       attr.license = (uint64_t)bpf_license;
-       attr.log_buf = (uint64_t)bpf_log_buf;
+       attr.insns = (unsigned long) &prog;
+       attr.license = (unsigned long) &bpf_license;
+       attr.log_buf = (unsigned long) &bpf_log_buf;
        attr.log_size = sizeof(bpf_log_buf);
        attr.log_level = 1;
        attr.kern_version = 0;
@@ -351,8 +351,8 @@ static void test_filter_no_reuseport(const struct test_params p)
        memset(&eprog, 0, sizeof(eprog));
        eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
        eprog.insn_cnt = ARRAY_SIZE(ecode);
-       eprog.insns = (uint64_t)ecode;
-       eprog.license = (uint64_t)bpf_license;
+       eprog.insns = (unsigned long) &ecode;
+       eprog.license = (unsigned long) &bpf_license;
        eprog.kern_version = 0;
 
        memset(&cprog, 0, sizeof(cprog));