]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'perf-core-for-mingo-4.12-20170503' of git://git.kernel.org/pub/scm/linux...
authorIngo Molnar <mingo@kernel.org>
Wed, 3 May 2017 17:28:27 +0000 (19:28 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 3 May 2017 17:28:27 +0000 (19:28 +0200)
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

Fixes:

- Support setting probes in versioned user space symbols, such as
  pthread_create@@GLIBC_2.1, picking the default one, more work
  needed to make it possible to set it on the other versions, as
  the 'perf probe' syntax already uses @ for other purposes.
  (Paul Clarke)

- Do not special case address zero as an error for routines that
  return addresses (symbol lookup), instead use the return as the
  success/error indication and pass a pointer to return the address,
  fixing 'perf test vmlinux' (the one that compares address between
  vmlinux and kallsyms) on s/390, where the '_text' address is equal
  to zero (Arnaldo Carvalho de Melo)

Infrastructure changes:

- More header sanitization, moving stuff out of util.h into
  more appropriate headers and objects and sometimes creating
  new ones (Arnaldo Carvalho de Melo)

- Refactor a duplicated code for obtaining config file name (Taeung Song)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
364 files changed:
.mailmap
Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/atomic.h
arch/arc/include/asm/entry-arcv2.h
arch/arc/include/asm/ptrace.h
arch/arc/kernel/setup.c
arch/arm/boot/dts/am335x-baltos.dtsi
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/sun8i-a33.dtsi
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/omap-hotplug.c
arch/arm/mach-omap2/omap-mpuss-lowpower.c
arch/arm/mach-omap2/omap-smc.S
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-orion5x/Kconfig
arch/arm/plat-orion/common.c
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
arch/ia64/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/ia64/lib/Makefile
arch/mips/Makefile
arch/mips/include/asm/asm-prototypes.h
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/elf.c
arch/mips/kernel/kgdb.c
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/relocate.c
arch/mips/kernel/smp-cps.c
arch/mips/mti-malta/malta-int.c
arch/mips/pci/pci-legacy.c
arch/parisc/include/asm/uaccess.h
arch/parisc/lib/lusercopy.S
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/s390/include/asm/pgtable.h
arch/sparc/Kconfig
arch/sparc/include/asm/ptrace.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/ptrace_64.c
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/mm/hugetlbpage.c
arch/x86/Makefile
arch/x86/entry/vdso/vdso32-setup.c
arch/x86/events/intel/rapl.c
arch/x86/include/asm/elf.h
arch/x86/include/asm/pmem.h
arch/x86/kernel/cpu/intel_rdt_schemata.c
arch/x86/kernel/cpu/mcheck/mce-genpool.c
arch/x86/kernel/cpu/mcheck/mce-internal.h
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/signal.c
arch/x86/kernel/signal_compat.c
arch/x86/kernel/traps.c
arch/x86/lib/delay.c
arch/x86/mm/init.c
arch/x86/platform/efi/quirks.c
block/blk-mq.c
block/elevator.c
crypto/ahash.c
crypto/algif_aead.c
crypto/lrw.c
crypto/xts.c
drivers/acpi/acpica/utresrc.c
drivers/acpi/nfit/core.c
drivers/acpi/power.c
drivers/acpi/scan.c
drivers/ata/pata_atiixp.c
drivers/ata/sata_via.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/zram/zram_drv.c
drivers/char/mem.c
drivers/char/virtio_console.c
drivers/clk/clk-stm32f4.c
drivers/clk/sunxi-ng/Kconfig
drivers/clk/sunxi-ng/ccu-sun8i-a33.c
drivers/clk/sunxi-ng/ccu_common.c
drivers/clk/sunxi-ng/ccu_common.h
drivers/cpufreq/cpufreq.c
drivers/dax/Kconfig
drivers/dax/dax.c
drivers/firmware/efi/libstub/gop.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/firmware.c
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
drivers/gpu/drm/udl/udl_transfer.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-uclogic.c
drivers/hid/wacom_wac.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/input/joystick/xpad.c
drivers/input/mouse/elantech.c
drivers/input/serio/i8042-x86ia64io.h
drivers/irqchip/irq-imx-gpcv2.c
drivers/mmc/core/sdio_bus.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mtd/ubi/upd.c
drivers/net/bonding/bond_main.c
drivers/net/can/ifi_canfd/ifi_canfd.c
drivers/net/can/rcar/rcar_can.c
drivers/net/can/usb/Kconfig
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb_core.h
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/b53/b53_regs.h
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/falcon/efx.c
drivers/net/ethernet/sfc/workarounds.h
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/phy/dp83640.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/team/team.c
drivers/net/usb/Kconfig
drivers/net/usb/ch9200.c
drivers/net/usb/cx82310_eth.c
drivers/net/usb/hso.c
drivers/net/usb/kaweth.c
drivers/net/usb/lan78xx.c
drivers/net/usb/plusb.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/sr9700.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/nvdimm/bus.c
drivers/nvdimm/claim.c
drivers/nvdimm/dimm_devs.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/loop.c
drivers/pci/dwc/pcie-hisi.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-exynos.h
drivers/pwm/pwm-lpss-pci.c
drivers/pwm/pwm-lpss-platform.c
drivers/pwm/pwm-lpss.c
drivers/pwm/pwm-lpss.h
drivers/pwm/pwm-rockchip.c
drivers/reset/core.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/ipr.c
drivers/scsi/qedf/qedf_fip.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sr.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/tty/tty_ldisc.c
drivers/usb/gadget/function/f_tcm.c
drivers/video/backlight/pwm_bl.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/ssd1307fb.c
drivers/video/fbdev/xen-fbfront.c
drivers/virtio/virtio.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/virtio/virtio_pci_modern.c
fs/btrfs/inode.c
fs/btrfs/qgroup.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/ceph/inode.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/smb1ops.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/hugetlbfs/inode.c
fs/namei.c
fs/nfs/direct.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfssvc.c
fs/nfsd/nfsxdr.c
fs/nsfs.c
fs/orangefs/devorangefs-req.c
fs/orangefs/orangefs-bufmap.c
fs/orangefs/orangefs-kernel.h
fs/orangefs/super.c
fs/proc/task_mmu.c
fs/stat.c
fs/ubifs/debug.c
fs/ubifs/dir.c
include/crypto/internal/hash.h
include/linux/blkdev.h
include/linux/cgroup.h
include/linux/mmc/sdio_func.h
include/linux/mmu_notifier.h
include/linux/phy.h
include/linux/reset.h
include/linux/sched.h
include/linux/uio.h
include/linux/virtio.h
include/target/target_core_base.h
include/uapi/linux/Kbuild
include/uapi/linux/ipv6_route.h
include/uapi/linux/stat.h
include/uapi/linux/virtio_pci.h
kernel/audit.c
kernel/bpf/core.c
kernel/bpf/syscall.c
kernel/cgroup/cgroup.c
kernel/irq/affinity.c
kernel/kthread.c
kernel/locking/lockdep_internals.h
kernel/sched/cputime.c
kernel/sched/sched.h
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
lib/Kconfig.debug
lib/iov_iter.c
mm/huge_memory.c
mm/migrate.c
mm/page_alloc.c
mm/vmstat.c
mm/z3fold.c
mm/zsmalloc.c
net/9p/client.c
net/bridge/br_device.c
net/bridge/br_if.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/core/datagram.c
net/core/dev.c
net/core/netpoll.c
net/core/skbuff.c
net/ipv4/af_inet.c
net/ipv4/ip_sockglue.c
net/ipv4/ipmr.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/ip6_input.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/ndisc.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/seg6.c
net/key/af_key.c
net/l2tp/l2tp_ppp.c
net/mac80211/rx.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_nat_redirect.c
net/netfilter/nft_hash.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TPROXY.c
net/packet/af_packet.c
net/qrtr/qrtr.c
net/sched/act_api.c
net/sched/sch_generic.c
net/sctp/socket.c
net/tipc/socket.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
security/keys/gc.c
security/keys/keyctl.c
security/keys/process_keys.c
sound/core/seq/seq_lock.c
sound/firewire/lib.h
sound/firewire/oxfw/oxfw.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/boards/bytcr_rt5651.c
sound/soc/soc-topology.c
sound/soc/sti/uniperif.h
sound/soc/sti/uniperif_player.c
sound/soc/sti/uniperif_reader.c
tools/power/cpupower/utils/helpers/cpuid.c
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc [new file with mode: 0644]
tools/testing/selftests/net/psock_fanout.c
tools/testing/selftests/net/psock_lib.h

index e229922dc7f0a30cefab74d0f75313af513965e1..1d6f4e7280dc67f630b79456b3f1e5a4c0a41343 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
 Mark Brown <broonie@sirena.org.uk>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
 Matthieu CASTET <castet.matthieu@free.fr>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
 Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
index b7fa3b97986d5fff391bf4ff1a01bdd25ce096e9..a339dbb154933282ee06aaeec0571e0d95a72b42 100644 (file)
@@ -44,13 +44,19 @@ Hip05 Example (note that Hip06 is the same except compatible):
        };
 
 HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description.
+
+Some BIOSes place the host controller in a mode where it is ECAM
+compliant for all devices other than the root complex. In such cases,
+the host controller should be described as below.
+
 The properties and their meanings are identical to those described in
 host-generic-pci.txt except as listed below.
 
 Properties of the host controller node that differ from
 host-generic-pci.txt:
 
-- compatible     : Must be "hisilicon,pcie-almost-ecam"
+- compatible     : Must be "hisilicon,hip06-pcie-ecam", or
+                  "hisilicon,hip07-pcie-ecam"
 
 - reg            : Two entries: First the ECAM configuration space for any
                   other bus underneath the root bus. Second, the base
@@ -59,7 +65,7 @@ host-generic-pci.txt:
 
 Example:
        pcie0: pcie@a0090000 {
-               compatible = "hisilicon,pcie-almost-ecam";
+               compatible = "hisilicon,hip06-pcie-ecam";
                reg = <0 0xb0000000 0 0x2000000>,  /*  ECAM configuration space */
                      <0 0xa0090000 0 0x10000>; /* host bridge registers */
                bus-range = <0  31>;
index fdd5350fe261f27a99d4a5c43c1e8a99eb498e26..38d3e4ed7208bb3969abcbfc76c4e68fd90d12f5 100644 (file)
@@ -2585,12 +2585,26 @@ F:      include/uapi/linux/if_bonding.h
 
 BPF (Safe dynamic programs and tools)
 M:     Alexei Starovoitov <ast@kernel.org>
+M:     Daniel Borkmann <daniel@iogearbox.net>
 L:     netdev@vger.kernel.org
 L:     linux-kernel@vger.kernel.org
 S:     Supported
+F:     arch/x86/net/bpf_jit*
+F:     Documentation/networking/filter.txt
+F:     include/linux/bpf*
+F:     include/linux/filter.h
+F:     include/uapi/linux/bpf*
+F:     include/uapi/linux/filter.h
 F:     kernel/bpf/
-F:     tools/testing/selftests/bpf/
+F:     kernel/trace/bpf_trace.c
 F:     lib/test_bpf.c
+F:     net/bpf/
+F:     net/core/filter.c
+F:     net/sched/act_bpf.c
+F:     net/sched/cls_bpf.c
+F:     samples/bpf/
+F:     tools/net/bpf*
+F:     tools/testing/selftests/bpf/
 
 BROADCOM B44 10/100 ETHERNET DRIVER
 M:     Michael Chan <michael.chan@broadcom.com>
@@ -8761,6 +8775,7 @@ W:        http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+B:     mailto:netdev@vger.kernel.org
 S:     Maintained
 F:     net/
 F:     include/net/
@@ -12464,7 +12479,6 @@ F:      drivers/clk/ti/
 F:     include/linux/clk/ti.h
 
 TI ETHERNET SWITCH DRIVER (CPSW)
-M:     Mugunthan V N <mugunthanvnm@ti.com>
 R:     Grygorii Strashko <grygorii.strashko@ti.com>
 L:     linux-omap@vger.kernel.org
 L:     netdev@vger.kernel.org
@@ -13305,7 +13319,7 @@ F:      drivers/virtio/
 F:     tools/virtio/
 F:     drivers/net/virtio_net.c
 F:     drivers/block/virtio_blk.c
-F:     include/linux/virtio_*.h
+F:     include/linux/virtio*.h
 F:     include/uapi/linux/virtio_*.h
 F:     drivers/crypto/virtio/
 
index efa267a92ba695b00090ad72e9599cfbb9af3b98..4b074a904106fa91e75d52c6ea459a2b084acc34 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index c9f30f4763abce5ca1ffda1817e87bb5de410861..5d7fb3e7cb97159012d49e6cee745ada293949a6 100644 (file)
@@ -406,6 +406,14 @@ config ARC_HAS_DIV_REM
        bool "Insn: div, divu, rem, remu"
        default y
 
+config ARC_HAS_ACCL_REGS
+       bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
+       default n
+       help
+         Depending on the configuration, CPU can contain accumulator reg-pair
+         (also referred to as r58:r59). These can also be used by gcc as GPR so
+         kernel needs to save/restore per process
+
 endif  # ISA_ARCV2
 
 endmenu   # "ARC CPU Configuration"
index b65930a4958959fb65891ac375a7637a0e5cb146..54b54da6384c197a93848e05ef292c224f5c0f91 100644 (file)
 #include <asm/barrier.h>
 #include <asm/smp.h>
 
+#define ATOMIC_INIT(i) { (i) }
+
 #ifndef CONFIG_ARC_PLAT_EZNPS
 
 #define atomic_read(v)  READ_ONCE((v)->counter)
-#define ATOMIC_INIT(i) { (i) }
 
 #ifdef CONFIG_ARC_HAS_LLSC
 
index aee1a77934cf694e37ae579347a37bc167e43762..ac85380d14a4bb364bb9077e8c8b329f862845b8 100644 (file)
        ;
        ; Now manually save: r12, sp, fp, gp, r25
 
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       PUSH    r59
+       PUSH    r58
+#endif
+
        PUSH    r30
        PUSH    r12
 
        POP     r12
        POP     r30
 
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       POP     r58
+       POP     r59
+#endif
+
 .endm
 
 /*------------------------------------------------------------------------*/
index 47111d565a959d117ab9e2c7c9eea3b852137971..5297faa8a37803fd702da4270a7e23f5d6ba4f2b 100644 (file)
@@ -86,6 +86,10 @@ struct pt_regs {
 
        unsigned long r12, r30;
 
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */
+#endif
+
        /*------- Below list auto saved by h/w -----------*/
        unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
 
index fa62404ba58f77ab9fc24489b35c15132d03d720..fc8211f338ad33cab4036a09693cb77e1ea62a98 100644 (file)
@@ -319,7 +319,8 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
 static void arc_chk_core_config(void)
 {
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
-       int fpu_enabled;
+       int saved = 0, present = 0;
+       char *opt_nm = NULL;;
 
        if (!cpu->extn.timer0)
                panic("Timer0 is not present!\n");
@@ -346,17 +347,28 @@ static void arc_chk_core_config(void)
 
        /*
         * FP hardware/software config sanity
-        * -If hardware contains DPFP, kernel needs to save/restore FPU state
+        * -If hardware present, kernel needs to save/restore FPU state
         * -If not, it will crash trying to save/restore the non-existant regs
-        *
-        * (only DPDP checked since SP has no arch visible regs)
         */
-       fpu_enabled = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE);
 
-       if (cpu->extn.fpu_dp && !fpu_enabled)
-               pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
-       else if (!cpu->extn.fpu_dp && fpu_enabled)
-               panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
+       if (is_isa_arcompact()) {
+               opt_nm = "CONFIG_ARC_FPU_SAVE_RESTORE";
+               saved = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE);
+
+               /* only DPDP checked since SP has no arch visible regs */
+               present = cpu->extn.fpu_dp;
+       } else {
+               opt_nm = "CONFIG_ARC_HAS_ACCL_REGS";
+               saved = IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS);
+
+               /* Accumulator Low:High pair (r58:59) present if DSP MPY or FPU */
+               present = cpu->extn_mpy.dsp | cpu->extn.fpu_sp | cpu->extn.fpu_dp;
+       }
+
+       if (present && !saved)
+               pr_warn("Enable %s for working apps\n", opt_nm);
+       else if (!present && saved)
+               panic("Disable %s, hardware NOT present\n", opt_nm);
 }
 
 /*
index efb5eae290a8b7437b95206fa9f69ae4ccecab44..d42b98f15e8b97aaa5956047b51bd1d4a8d610ed 100644 (file)
 
        phy1: ethernet-phy@1 {
                reg = <7>;
+               eee-broken-100tx;
+               eee-broken-1000t;
        };
 };
 
index 9e43c443738a8acedd2c0e160a5b6ddc9e0f6c39..9ba4b18c0cb21711dcd8ef60648a0916914819e7 100644 (file)
        ti,non-removable;
        bus-width = <4>;
        cap-power-off-card;
+       keep-power-in-suspend;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc2_pins>;
 
index 2c9e56f4aac53aa74c206b2cbb3622965d7ab792..bbfb9d5a70a98116d303844a91c6a65dd42cfb39 100644 (file)
                                device_type = "pci";
                                ranges = <0x81000000 0 0          0x03000 0 0x00010000
                                          0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+                               bus-range = <0x00 0xff>;
                                #interrupt-cells = <1>;
                                num-lanes = <1>;
                                linux,pci-domain = <0>;
                                device_type = "pci";
                                ranges = <0x81000000 0 0          0x03000 0 0x00010000
                                          0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+                               bus-range = <0x00 0xff>;
                                #interrupt-cells = <1>;
                                num-lanes = <1>;
                                linux,pci-domain = <1>;
index 8f9a69ca818cecb759e71c1b6b97e4073c3e22e4..efe53998c961244fc0cd1ff32a5e53c885b6322f 100644 (file)
 &i2c3 {
        clock-frequency = <400000>;
        at24@50 {
-               compatible = "at24,24c02";
+               compatible = "atmel,24c64";
                readonly;
                reg = <0x50>;
        };
index 0467fb365bfca714b5ce9021974470002139039f..306af6cadf26033c6102b61c6a6d7693faba30fe 100644 (file)
                        opp-microvolt = <1200000>;
                        clock-latency-ns = <244144>; /* 8 32k periods */
                };
-
-               opp@1200000000 {
-                       opp-hz = /bits/ 64 <1200000000>;
-                       opp-microvolt = <1320000>;
-                       clock-latency-ns = <244144>; /* 8 32k periods */
-               };
        };
 
        cpus {
                        operating-points-v2 = <&cpu0_opp_table>;
                };
 
+               cpu@1 {
+                       operating-points-v2 = <&cpu0_opp_table>;
+               };
+
                cpu@2 {
                        compatible = "arm,cortex-a7";
                        device_type = "cpu";
                        reg = <2>;
+                       operating-points-v2 = <&cpu0_opp_table>;
                };
 
                cpu@3 {
                        compatible = "arm,cortex-a7";
                        device_type = "cpu";
                        reg = <3>;
+                       operating-points-v2 = <&cpu0_opp_table>;
                };
        };
 
index c4f2ace91ea22d1a6a344388498ec8da30906e63..3089d3bfa19b4a5c595e6872824c81f103d4c5e2 100644 (file)
@@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops;
 extern int omap4_mpuss_init(void);
 extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
 extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
+extern u32 omap4_get_cpu1_ns_pa_addr(void);
 #else
 static inline int omap4_enter_lowpower(unsigned int cpu,
                                        unsigned int power_state)
index d3fb5661bb5d4bc098b05e36f12278301b4c5597..433db6d0b07396288f3b1e38c57d0a64b2ad1e66 100644 (file)
@@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu)
                omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
 
                if (omap_secure_apis_support())
-                       boot_cpu = omap_read_auxcoreboot0();
+                       boot_cpu = omap_read_auxcoreboot0() >> 9;
                else
                        boot_cpu =
                                readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
index 113ab2dd2ee91ccf9c238813bd6c4d7561ff7d97..03ec6d307c8235fc907a599322991b1efd6c27bf 100644 (file)
@@ -64,6 +64,7 @@
 #include "prm-regbits-44xx.h"
 
 static void __iomem *sar_base;
+static u32 old_cpu1_ns_pa_addr;
 
 #if defined(CONFIG_PM) && defined(CONFIG_SMP)
 
@@ -212,6 +213,11 @@ static void __init save_l2x0_context(void)
 {}
 #endif
 
+u32 omap4_get_cpu1_ns_pa_addr(void)
+{
+       return old_cpu1_ns_pa_addr;
+}
+
 /**
  * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
  * The purpose of this function is to manage low power programming
@@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void)
 void __init omap4_mpuss_early_init(void)
 {
        unsigned long startup_pa;
+       void __iomem *ns_pa_addr;
 
-       if (!(cpu_is_omap44xx() || soc_is_omap54xx()))
+       if (!(soc_is_omap44xx() || soc_is_omap54xx()))
                return;
 
        sar_base = omap4_get_sar_ram_base();
 
-       if (cpu_is_omap443x())
+       /* Save old NS_PA_ADDR for validity checks later on */
+       if (soc_is_omap44xx())
+               ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+       else
+               ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+       old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
+
+       if (soc_is_omap443x())
                startup_pa = __pa_symbol(omap4_secondary_startup);
-       else if (cpu_is_omap446x())
+       else if (soc_is_omap446x())
                startup_pa = __pa_symbol(omap4460_secondary_startup);
        else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
                startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
        else
                startup_pa = __pa_symbol(omap5_secondary_startup);
 
-       if (cpu_is_omap44xx())
+       if (soc_is_omap44xx())
                writel_relaxed(startup_pa, sar_base +
                               CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
        else
index fd90125bffc70ad6719bfc2b9f3e22d21b595367..72506e6cf9e7423f946d83e61b5aca66d2fee0c0 100644 (file)
@@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0)
        ldr     r12, =0x103
        dsb
        smc     #0
-       mov     r0, r0, lsr #9
        ldmfd   sp!, {r2-r12, pc}
 ENDPROC(omap_read_auxcoreboot0)
index 003353b0b7944d9363fb6446e683314823cd82f9..3faf454ba4871c8f60d5e12e912a1ff9dd9af271 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/io.h>
 #include <linux/irqchip/arm-gic.h>
 
+#include <asm/sections.h>
 #include <asm/smp_scu.h>
 #include <asm/virt.h>
 
 
 #define OMAP5_CORE_COUNT       0x2
 
+#define AUX_CORE_BOOT0_GP_RELEASE      0x020
+#define AUX_CORE_BOOT0_HS_RELEASE      0x200
+
 struct omap_smp_config {
        unsigned long cpu1_rstctrl_pa;
        void __iomem *cpu1_rstctrl_va;
        void __iomem *scu_base;
+       void __iomem *wakeupgen_base;
        void *startup_addr;
 };
 
@@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
        static struct clockdomain *cpu1_clkdm;
        static bool booted;
        static struct powerdomain *cpu1_pwrdm;
-       void __iomem *base = omap_get_wakeupgen_base();
 
        /*
         * Set synchronisation state between this boot processor
@@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
         * A barrier is added to ensure that write buffer is drained
         */
        if (omap_secure_apis_support())
-               omap_modify_auxcoreboot0(0x200, 0xfffffdff);
+               omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
+                                        0xfffffdff);
        else
-               writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0);
+               writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
+                              cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
 
        if (!cpu1_clkdm && !cpu1_pwrdm) {
                cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
@@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void)
                set_cpu_possible(i, true);
 }
 
+/*
+ * For now, just make sure the start-up address is not within the booting
+ * kernel space as that means we just overwrote whatever secondary_startup()
+ * code there was.
+ */
+static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
+{
+       if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
+               return false;
+
+       return true;
+}
+
+/*
+ * We may need to reset CPU1 before configuring, otherwise kexec boot can end
+ * up trying to use old kernel startup address or suspend-resume will
+ * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
+ * idle states.
+ */
+static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
+{
+       unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
+       bool needs_reset = false;
+       u32 released;
+
+       if (omap_secure_apis_support())
+               released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
+       else
+               released = readl_relaxed(cfg.wakeupgen_base +
+                                        OMAP_AUX_CORE_BOOT_0) &
+                                               AUX_CORE_BOOT0_GP_RELEASE;
+       if (released) {
+               pr_warn("smp: CPU1 not parked?\n");
+
+               return;
+       }
+
+       cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
+                                       OMAP_AUX_CORE_BOOT_1);
+       cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
+
+       /* Did the configured secondary_startup() get overwritten? */
+       if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
+               needs_reset = true;
+
+       /*
+        * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
+        * deeper idle state in WFI and will wake to an invalid address.
+        */
+       if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
+           !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
+               needs_reset = true;
+
+       if (!needs_reset || !c->cpu1_rstctrl_va)
+               return;
+
+       pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
+               cpu1_startup_pa, cpu1_ns_pa_addr);
+
+       writel_relaxed(1, c->cpu1_rstctrl_va);
+       readl_relaxed(c->cpu1_rstctrl_va);
+       writel_relaxed(0, c->cpu1_rstctrl_va);
+}
+
 static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
 {
-       void __iomem *base = omap_get_wakeupgen_base();
        const struct omap_smp_config *c = NULL;
 
        if (soc_is_omap443x())
@@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
        /* Must preserve cfg.scu_base set earlier */
        cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
        cfg.startup_addr = c->startup_addr;
+       cfg.wakeupgen_base = omap_get_wakeupgen_base();
 
        if (soc_is_dra74x() || soc_is_omap54xx()) {
                if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
@@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
        if (cfg.scu_base)
                scu_enable(cfg.scu_base);
 
-       /*
-        * Reset CPU1 before configuring, otherwise kexec will
-        * end up trying to use old kernel startup address.
-        */
-       if (cfg.cpu1_rstctrl_va) {
-               writel_relaxed(1, cfg.cpu1_rstctrl_va);
-               readl_relaxed(cfg.cpu1_rstctrl_va);
-               writel_relaxed(0, cfg.cpu1_rstctrl_va);
-       }
+       omap4_smp_maybe_reset_cpu1(&cfg);
 
        /*
         * Write the address of secondary startup routine into the
@@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
                omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
        else
                writel_relaxed(__pa_symbol(cfg.startup_addr),
-                              base + OMAP_AUX_CORE_BOOT_1);
+                              cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
 }
 
 const struct smp_operations omap4_smp_ops __initconst = {
index e920dd83e443753ccced325ce19c48c6bca398c6..f989145480c8fcd0c947beaadeefe6955896a434 100644 (file)
@@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
                                dev_err(dev, "failed to idle\n");
                }
                break;
+       case BUS_NOTIFY_BIND_DRIVER:
+               od = to_omap_device(pdev);
+               if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
+                   pm_runtime_status_suspended(dev)) {
+                       od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
+                       pm_runtime_set_active(dev);
+               }
+               break;
        case BUS_NOTIFY_ADD_DEVICE:
                if (pdev->dev.of_node)
                        omap_device_build_from_dt(pdev);
index 633442ad4e4c16d4c80564410167b53977e7575f..2a7bb6ccdcb7eb219f515c6e0f1ba2bfe573a349 100644 (file)
@@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X
        select GPIOLIB
        select MVEBU_MBUS
        select PCI
+       select PHYLIB if NETDEVICES
        select PLAT_ORION_LEGACY
        help
          Support for the following Marvell Orion 5x series SoCs:
index 9255b6d67ba5e3a3b3586639cadc86342e8b4b04..aff6994950ba6db7eb6579a90cc94e5b2bfc7329 100644 (file)
@@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
                    eth_data, &orion_ge11);
 }
 
+#ifdef CONFIG_ARCH_ORION5X
 /*****************************************************************************
  * Ethernet switch
  ****************************************************************************/
@@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
        struct mdio_board_info *bd;
        unsigned int i;
 
+       if (!IS_BUILTIN(CONFIG_PHYLIB))
+               return;
+
        for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
                if (!strcmp(d->port_names[i], "cpu"))
                        break;
@@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
 
        mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
 }
+#endif
 
 /*****************************************************************************
  * I2C
index 1c64ea2d23f96a4a990f9f6e8cd4b22fd3fd3dd3..0565779e66fafd9755048c2a1c7f8ebc15533eff 100644 (file)
                usbphy: phy@01c19400 {
                        compatible = "allwinner,sun50i-a64-usb-phy";
                        reg = <0x01c19400 0x14>,
+                             <0x01c1a800 0x4>,
                              <0x01c1b800 0x4>;
                        reg-names = "phy_ctrl",
+                                   "pmu0",
                                    "pmu1";
                        clocks = <&ccu CLK_USB_PHY0>,
                                 <&ccu CLK_USB_PHY1>;
diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..a2c1398
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_IA64_ASM_PROTOTYPES_H
+#define _ASM_IA64_ASM_PROTOTYPES_H
+
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/esi.h>
+#include <asm/ftrace.h>
+#include <asm/page.h>
+#include <asm/pal.h>
+#include <asm/string.h>
+#include <asm/uaccess.h>
+#include <asm/unwind.h>
+#include <asm/xor.h>
+
+extern const char ia64_ivt[];
+
+signed int __divsi3(signed int, unsigned int);
+signed int __modsi3(signed int, unsigned int);
+
+signed long long __divdi3(signed long long, unsigned long long);
+signed long long __moddi3(signed long long, unsigned long long);
+
+unsigned int __udivsi3(unsigned int, unsigned int);
+unsigned int __umodsi3(unsigned int, unsigned int);
+
+unsigned long long __udivdi3(unsigned long long, unsigned long long);
+unsigned long long __umoddi3(unsigned long long, unsigned long long);
+
+#endif /* _ASM_IA64_ASM_PROTOTYPES_H */
index 1f3d3877618fdc934ab20f07695476206fe35e00..0a40b14407b1692c7a684bb2ea689d130df36805 100644 (file)
@@ -24,25 +24,25 @@ AFLAGS___modsi3.o   =            -DMODULO
 AFLAGS___umodsi3.o     = -DUNSIGNED -DMODULO
 
 $(obj)/__divdi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__moddi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__divsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__modsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
index 8ef9c02747fa95a753ea79b77ed2a177a60ac6ad..02a1787c888c09b0d36f0b9ea6bea0988c3134c2 100644 (file)
@@ -489,7 +489,7 @@ $(generic_defconfigs):
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
                -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/generic_defconfig $^ \
                $(foreach board,$(BOARDS),$(generic_config_dir)/board-$(board).config)
-       $(Q)$(MAKE) olddefconfig
+       $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
 
 #
 # Prevent generic merge_config rules attempting to merge single fragments
@@ -503,8 +503,8 @@ $(generic_config_dir)/%.config: ;
 #
 .PHONY: sead3_defconfig
 sead3_defconfig:
-       $(Q)$(MAKE) 32r2el_defconfig BOARDS=sead-3
+       $(Q)$(MAKE) -f $(srctree)/Makefile 32r2el_defconfig BOARDS=sead-3
 
 .PHONY: sead3micro_defconfig
 sead3micro_defconfig:
-       $(Q)$(MAKE) micro32r2el_defconfig BOARDS=sead-3
+       $(Q)$(MAKE) -f $(srctree)/Makefile micro32r2el_defconfig BOARDS=sead-3
index a160cf69bb92d3ec35ec20236015b8405f29e0a3..6e28971fe73ad7e8ec0ab07c5e6e87b68d76f857 100644 (file)
@@ -3,3 +3,4 @@
 #include <asm/fpu.h>
 #include <asm-generic/asm-prototypes.h>
 #include <asm/uaccess.h>
+#include <asm/ftrace.h>
index 804d2a2a19fe03175aa6ad6fa56f91238591f493..dd6a18bc10abd0c34a6717fe12bb7169a4589231 100644 (file)
@@ -80,7 +80,7 @@ static unsigned int calculate_min_delta(void)
                }
 
                /* Sorted insert of 75th percentile into buf2 */
-               for (k = 0; k < i; ++k) {
+               for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
                        if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
                                l = min_t(unsigned int,
                                          i, ARRAY_SIZE(buf2) - 1);
index 6430bff21fff80a7bb6647c6af0198364cac7bdd..5c429d70e17f6f24cbcfd0fa912c67eccd11f28f 100644 (file)
@@ -257,7 +257,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
        else if ((prog_req.fr1 && prog_req.frdefault) ||
                 (prog_req.single && !prog_req.frdefault))
                /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
-               state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+               state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
                                          cpu_has_mips_r2_r6) ?
                                          FP_FR1 : FP_FR0;
        else if (prog_req.fr1)
index 1f4bd222ba765788fe8e9c1f14e382c59b60df62..eb6c0d582626b114fcb8d30f9fb28ee3472e8cc9 100644 (file)
@@ -244,9 +244,6 @@ static int compute_signal(int tt)
 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
 {
        int reg;
-       struct thread_info *ti = task_thread_info(p);
-       unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
-       struct pt_regs *regs = (struct pt_regs *)ksp - 1;
 #if (KGDB_GDB_REG_SIZE == 32)
        u32 *ptr = (u32 *)gdb_regs;
 #else
@@ -254,25 +251,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
 #endif
 
        for (reg = 0; reg < 16; reg++)
-               *(ptr++) = regs->regs[reg];
+               *(ptr++) = 0;
 
        /* S0 - S7 */
-       for (reg = 16; reg < 24; reg++)
-               *(ptr++) = regs->regs[reg];
+       *(ptr++) = p->thread.reg16;
+       *(ptr++) = p->thread.reg17;
+       *(ptr++) = p->thread.reg18;
+       *(ptr++) = p->thread.reg19;
+       *(ptr++) = p->thread.reg20;
+       *(ptr++) = p->thread.reg21;
+       *(ptr++) = p->thread.reg22;
+       *(ptr++) = p->thread.reg23;
 
        for (reg = 24; reg < 28; reg++)
                *(ptr++) = 0;
 
        /* GP, SP, FP, RA */
-       for (reg = 28; reg < 32; reg++)
-               *(ptr++) = regs->regs[reg];
-
-       *(ptr++) = regs->cp0_status;
-       *(ptr++) = regs->lo;
-       *(ptr++) = regs->hi;
-       *(ptr++) = regs->cp0_badvaddr;
-       *(ptr++) = regs->cp0_cause;
-       *(ptr++) = regs->cp0_epc;
+       *(ptr++) = (long)p;
+       *(ptr++) = p->thread.reg29;
+       *(ptr++) = p->thread.reg30;
+       *(ptr++) = p->thread.reg31;
+
+       *(ptr++) = p->thread.cp0_status;
+
+       /* lo, hi */
+       *(ptr++) = 0;
+       *(ptr++) = 0;
+
+       /*
+        * BadVAddr, Cause
+        * Ideally these would come from the last exception frame up the stack
+        * but that requires unwinding, otherwise we can't know much for sure.
+        */
+       *(ptr++) = 0;
+       *(ptr++) = 0;
+
+       /*
+        * PC
+        * use return address (RA), i.e. the moment after return from resume()
+        */
+       *(ptr++) = p->thread.reg31;
 }
 
 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
index 8c35b3152e1eb35cab7f8f605fe48163310b5024..9452b02ce0797e7ac890d1785f50907d9e747b5d 100644 (file)
@@ -1446,6 +1446,11 @@ static int mipsxx_pmu_handle_shared_irq(void)
        HANDLE_COUNTER(0)
        }
 
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+       read_unlock(&pmuint_rwlock);
+#endif
+       resume_local_counters();
+
        /*
         * Do all the work for the pending perf events. We can do this
         * in here because the performance counter interrupt is a regular
@@ -1454,10 +1459,6 @@ static int mipsxx_pmu_handle_shared_irq(void)
        if (handled == IRQ_HANDLED)
                irq_work_run();
 
-#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
-       read_unlock(&pmuint_rwlock);
-#endif
-       resume_local_counters();
        return handled;
 }
 
index 9103bebc9a8eef76e3d524b63b74aa91936f3663..2d1a0c4387713c7862e06bbf79e8c904a07b2279 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/libfdt.h>
 #include <linux/of_fdt.h>
-#include <linux/sched.h>
+#include <linux/sched/task.h>
 #include <linux/start_kernel.h>
 #include <linux/string.h>
 #include <linux/printk.h>
index 6d45f05538c8b37db5fff707d50daa42ef8b097a..795b4aaf89277be344e2e3e512a3642fb3085f5a 100644 (file)
@@ -422,13 +422,12 @@ void play_dead(void)
        local_irq_disable();
        idle_task_exit();
        cpu = smp_processor_id();
+       core = cpu_data[cpu].core;
        cpu_death = CPU_DEATH_POWER;
 
        pr_debug("CPU%d going offline\n", cpu);
 
        if (cpu_has_mipsmt || cpu_has_vp) {
-               core = cpu_data[cpu].core;
-
                /* Look for another online VPE within the core */
                for_each_online_cpu(cpu_death_sibling) {
                        if (cpu_data[cpu_death_sibling].core != core)
index cb675ec6f283ee9d08071e9b112845bddd5c1594..54f56d5a96c46ec8c3b2b1a6e8bcdc06dbf5e02a 100644 (file)
@@ -232,6 +232,17 @@ void __init arch_init_irq(void)
 {
        int corehi_irq;
 
+       /*
+        * Preallocate the i8259's expected virq's here. Since irqchip_init()
+        * will probe the irqchips in hierarchial order, i8259 is probed last.
+        * If anything allocates a virq before the i8259 is probed, it will
+        * be given one of the i8259's expected range and consequently setup
+        * of the i8259 will fail.
+        */
+       WARN(irq_alloc_descs(I8259A_IRQ_BASE, I8259A_IRQ_BASE,
+                           16, numa_node_id()) < 0,
+               "Cannot reserve i8259 virqs at IRQ%d\n", I8259A_IRQ_BASE);
+
        i8259_set_poll(mips_pcibios_iack);
        irqchip_init();
 
index 014649be158d95f0cce86493ac7186b7ceec73d8..3a84f6c0c840569aa4f9db2d15b31c8079cfd39e 100644 (file)
@@ -190,7 +190,7 @@ void register_pci_controller(struct pci_controller *hose)
        }
 
        INIT_LIST_HEAD(&hose->list);
-       list_add(&hose->list, &controllers);
+       list_add_tail(&hose->list, &controllers);
 
        /*
         * Do not panic here but later - this might happen before console init.
index 8442727f28d2732eecb583d46bdcc88258a590dd..cbd4f4af8108bc8fa9ec36504589eac7107e6b1c 100644 (file)
 #define get_user __get_user
 
 #if !defined(CONFIG_64BIT)
-#define LDD_USER(ptr)          __get_user_asm64(ptr)
+#define LDD_USER(val, ptr)     __get_user_asm64(val, ptr)
 #define STD_USER(x, ptr)       __put_user_asm64(x, ptr)
 #else
-#define LDD_USER(ptr)          __get_user_asm("ldd", ptr)
+#define LDD_USER(val, ptr)     __get_user_asm(val, "ldd", ptr)
 #define STD_USER(x, ptr)       __put_user_asm("std", x, ptr)
 #endif
 
@@ -97,63 +97,87 @@ struct exception_data {
                " mtsp %0,%%sr2\n\t"            \
                : : "r"(get_fs()) : )
 
-#define __get_user(x, ptr)                               \
-({                                                       \
-       register long __gu_err __asm__ ("r8") = 0;       \
-       register long __gu_val;                          \
-                                                        \
-       load_sr2();                                      \
-       switch (sizeof(*(ptr))) {                        \
-           case 1: __get_user_asm("ldb", ptr); break;   \
-           case 2: __get_user_asm("ldh", ptr); break;   \
-           case 4: __get_user_asm("ldw", ptr); break;   \
-           case 8: LDD_USER(ptr);  break;               \
-           default: BUILD_BUG(); break;                 \
-       }                                                \
-                                                        \
-       (x) = (__force __typeof__(*(ptr))) __gu_val;     \
-       __gu_err;                                        \
+#define __get_user_internal(val, ptr)                  \
+({                                                     \
+       register long __gu_err __asm__ ("r8") = 0;      \
+                                                       \
+       switch (sizeof(*(ptr))) {                       \
+       case 1: __get_user_asm(val, "ldb", ptr); break; \
+       case 2: __get_user_asm(val, "ldh", ptr); break; \
+       case 4: __get_user_asm(val, "ldw", ptr); break; \
+       case 8: LDD_USER(val, ptr); break;              \
+       default: BUILD_BUG();                           \
+       }                                               \
+                                                       \
+       __gu_err;                                       \
 })
 
-#define __get_user_asm(ldx, ptr)                        \
+#define __get_user(val, ptr)                           \
+({                                                     \
+       load_sr2();                                     \
+       __get_user_internal(val, ptr);                  \
+})
+
+#define __get_user_asm(val, ldx, ptr)                  \
+{                                                      \
+       register long __gu_val;                         \
+                                                       \
        __asm__("1: " ldx " 0(%%sr2,%2),%0\n"           \
                "9:\n"                                  \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
                : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err));
+               : "r"(ptr), "1"(__gu_err));             \
+                                                       \
+       (val) = (__force __typeof__(*(ptr))) __gu_val;  \
+}
 
 #if !defined(CONFIG_64BIT)
 
-#define __get_user_asm64(ptr)                          \
+#define __get_user_asm64(val, ptr)                     \
+{                                                      \
+       union {                                         \
+               unsigned long long      l;              \
+               __typeof__(*(ptr))      t;              \
+       } __gu_tmp;                                     \
+                                                       \
        __asm__("   copy %%r0,%R0\n"                    \
                "1: ldw 0(%%sr2,%2),%0\n"               \
                "2: ldw 4(%%sr2,%2),%R0\n"              \
                "9:\n"                                  \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
-               : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err));
+               : "=&r"(__gu_tmp.l), "=r"(__gu_err)     \
+               : "r"(ptr), "1"(__gu_err));             \
+                                                       \
+       (val) = __gu_tmp.t;                             \
+}
 
 #endif /* !defined(CONFIG_64BIT) */
 
 
-#define __put_user(x, ptr)                                      \
+#define __put_user_internal(x, ptr)                            \
 ({                                                             \
        register long __pu_err __asm__ ("r8") = 0;              \
         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);      \
                                                                \
-       load_sr2();                                             \
        switch (sizeof(*(ptr))) {                               \
-           case 1: __put_user_asm("stb", __x, ptr); break;     \
-           case 2: __put_user_asm("sth", __x, ptr); break;     \
-           case 4: __put_user_asm("stw", __x, ptr); break;     \
-           case 8: STD_USER(__x, ptr); break;                  \
-           default: BUILD_BUG(); break;                        \
-       }                                                       \
+       case 1: __put_user_asm("stb", __x, ptr); break;         \
+       case 2: __put_user_asm("sth", __x, ptr); break;         \
+       case 4: __put_user_asm("stw", __x, ptr); break;         \
+       case 8: STD_USER(__x, ptr); break;                      \
+       default: BUILD_BUG();                                   \
+       }                                                       \
                                                                \
        __pu_err;                                               \
 })
 
+#define __put_user(x, ptr)                                     \
+({                                                             \
+       load_sr2();                                             \
+       __put_user_internal(x, ptr);                            \
+})
+
+
 /*
  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  * instead of writing. This is because they do not write to any memory
index f01188c044ee83e41ba52162544464781056f262..85c28bb80fb7433dfcfac2fb10f6cc121448119f 100644 (file)
@@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy)
        add     dst,len,end
 
        /* short copy with less than 16 bytes? */
-       cmpib,>>=,n 15,len,.Lbyte_loop
+       cmpib,COND(>>=),n 15,len,.Lbyte_loop
 
        /* same alignment? */
        xor     src,dst,t0
@@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy)
        /* loop until we are 64-bit aligned */
 .Lalign_loop64:
        extru   dst,31,3,t1
-       cmpib,=,n       0,t1,.Lcopy_loop_16
+       cmpib,=,n       0,t1,.Lcopy_loop_16_start
 20:    ldb,ma  1(srcspc,src),t1
 21:    stb,ma  t1,1(dstspc,dst)
        b       .Lalign_loop64
@@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy)
        ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
        ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
 
+.Lcopy_loop_16_start:
        ldi     31,t0
 .Lcopy_loop_16:
        cmpb,COND(>>=),n t0,len,.Lword_loop
@@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy)
        /* loop until we are 32-bit aligned */
 .Lalign_loop32:
        extru   dst,31,2,t1
-       cmpib,=,n       0,t1,.Lcopy_loop_4
+       cmpib,=,n       0,t1,.Lcopy_loop_8
 20:    ldb,ma  1(srcspc,src),t1
 21:    stb,ma  t1,1(dstspc,dst)
        b       .Lalign_loop32
@@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy)
        ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
 
 
-.Lcopy_loop_4:
+.Lcopy_loop_8:
        cmpib,COND(>>=),n 15,len,.Lbyte_loop
 
 10:    ldw     0(srcspc,src),t1
@@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy)
        ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
        ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
 
-       b       .Lcopy_loop_4
+       b       .Lcopy_loop_8
        ldo     -16(len),len
 
 .Lbyte_loop:
@@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy)
 .Lunaligned_copy:
        /* align until dst is 32bit-word-aligned */
        extru   dst,31,2,t1
-       cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
+       cmpib,=,n       0,t1,.Lcopy_dstaligned
 20:    ldb     0(srcspc,src),t1
        ldo     1(src),src
 21:    stb,ma  t1,1(dstspc,dst)
@@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy)
        cmpiclr,<> 1,t0,%r0
        b,n .Lcase1
 .Lcase0:
-       cmpb,= %r0,len,.Lcda_finish
+       cmpb,COND(=) %r0,len,.Lcda_finish
        nop
 
 1:     ldw,ma 4(srcspc,src), a3
@@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy)
 1:     ldw,ma 4(srcspc,src), a3
        ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
        ldo -1(len),len
-       cmpb,=,n %r0,len,.Ldo0
+       cmpb,COND(=),n %r0,len,.Ldo0
 .Ldo4:
 1:     ldw,ma 4(srcspc,src), a0
        ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
@@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy)
 1:     stw,ma t0, 4(dstspc,dst)
        ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
        ldo -4(len),len
-       cmpb,<> %r0,len,.Ldo4
+       cmpb,COND(<>) %r0,len,.Ldo4
        nop
 .Ldo0:
        shrpw a2, a3, %sar, t0
@@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy)
        /* fault exception fixup handlers: */
 #ifdef CONFIG_64BIT
 .Lcopy16_fault:
-10:    b       .Lcopy_done
-       std,ma  t1,8(dstspc,dst)
+       b       .Lcopy_done
+10:    std,ma  t1,8(dstspc,dst)
        ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
 #endif
 
 .Lcopy8_fault:
-10:    b       .Lcopy_done
-       stw,ma  t1,4(dstspc,dst)
+       b       .Lcopy_done
+10:    stw,ma  t1,4(dstspc,dst)
        ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
 
        .exit
index 14752eee3d0c44816a61b395970186c1e56d4a21..ed3beadd2cc515d1b8a99b4836b3f06c1a97a87c 100644 (file)
@@ -236,9 +236,9 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
        mtctr   reg;                                                    \
        bctr
 
-#define BRANCH_LINK_TO_FAR(reg, label)                                 \
-       __LOAD_FAR_HANDLER(reg, label);                                 \
-       mtctr   reg;                                                    \
+#define BRANCH_LINK_TO_FAR(label)                                      \
+       __LOAD_FAR_HANDLER(r12, label);                                 \
+       mtctr   r12;                                                    \
        bctrl
 
 /*
@@ -265,7 +265,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 #define BRANCH_TO_COMMON(reg, label)                                   \
        b       label
 
-#define BRANCH_LINK_TO_FAR(reg, label)                                 \
+#define BRANCH_LINK_TO_FAR(label)                                      \
        bl      label
 
 #define BRANCH_TO_KVM(reg, label)                                      \
index 6432d4bf08c889c128803cc5505546122118b964..767ef6d68c9ebf379dad6f065bcc0279a3021bb9 100644 (file)
@@ -689,7 +689,7 @@ resume_kernel:
 
        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
 
-       lwz     r3,GPR1(r1)
+       l     r3,GPR1(r1)
        subi    r3,r3,INT_FRAME_SIZE    /* dst: Allocate a trampoline exception frame */
        mr      r4,r1                   /* src:  current exception frame */
        mr      r1,r3                   /* Reroute the trampoline frame to r1 */
@@ -703,8 +703,8 @@ resume_kernel:
        addi    r6,r6,8
        bdnz    2b
 
-       /* Do real store operation to complete stwu */
-       lwz     r5,GPR1(r1)
+       /* Do real store operation to complete stdu */
+       l     r5,GPR1(r1)
        std     r8,0(r5)
 
        /* Clear _TIF_EMULATE_STACK_STORE flag */
index 857bf7c5b9465aff743b3e62094cc47d808c8c92..6353019966e6a3bd15afa32d7318c47810a1d8e9 100644 (file)
@@ -982,7 +982,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
        EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
        EXCEPTION_PROLOG_COMMON_3(0xe60)
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode)
+       BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */
        /* Windup the stack. */
        /* Move original HSRR0 and HSRR1 into the respective regs */
        ld      r9,_MSR(r1)
index 93e37b12e88237766821369e19827e5e2d844a1b..ecec682bb5166a41d9c86025fd3f4e9461f6ec71 100644 (file)
@@ -1051,6 +1051,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 {
        if (!MACHINE_HAS_NX)
                pte_val(entry) &= ~_PAGE_NOEXEC;
+       if (pte_present(entry))
+               pte_val(entry) &= ~_PAGE_UNUSED;
        if (mm_has_pgste(mm))
                ptep_set_pte_at(mm, addr, ptep, entry);
        else
index 68ac5c7cd982619581dfa65b75ac89aa8e359554..3db2543733a5874f7a8a5e76275eb505cc1c3db3 100644 (file)
@@ -43,7 +43,7 @@ config SPARC
        select ARCH_HAS_SG_CHAIN
        select CPU_NO_EFFICIENT_FFS
        select HAVE_ARCH_HARDENED_USERCOPY
-       select PROVE_LOCKING_SMALL if PROVE_LOCKING
+       select LOCKDEP_SMALL if LOCKDEP
        select ARCH_WANT_RELAX_ORDER
 
 config SPARC32
@@ -82,6 +82,7 @@ config SPARC64
        select HAVE_ARCH_AUDITSYSCALL
        select ARCH_SUPPORTS_ATOMIC_RMW
        select HAVE_NMI
+       select HAVE_REGS_AND_STACK_ACCESS_API
 
 config ARCH_DEFCONFIG
        string
index ca57f08bd3dba57e5e06e17de20eeee856e97bed..d73428e4333c980486561799aba8fc31ccc123b9 100644 (file)
@@ -83,7 +83,8 @@ unsigned long profile_pc(struct pt_regs *);
 
 #define MAX_REG_OFFSET (offsetof(struct pt_regs, magic))
 
-extern int regs_query_register_offset(const char *name);
+int regs_query_register_offset(const char *name);
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
 
 /**
  * regs_get_register() - get register value from its offset
index 36eee8132c22bac329e99fb7284211e11310ff26..ae77df75bffadd1e376a1baec7cb26286497ffb6 100644 (file)
 #define __NR_copy_file_range   357
 #define __NR_preadv2           358
 #define __NR_pwritev2          359
+#define __NR_statx             360
 
-#define NR_syscalls            360
+#define NR_syscalls            361
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
 #define __IGNORE_getresgid
 #endif
 
+/* Sparc doesn't have protection keys. */
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
+
 #endif /* _UAPI_SPARC_UNISTD_H */
index fc5124ccdb53c7abc43e381ba098c453892a7640..e1d965e90e1697a8205ca505dbf428cb37b30312 100644 (file)
@@ -1162,3 +1162,39 @@ int regs_query_register_offset(const char *name)
                        return roff->offset;
        return -EINVAL;
 }
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:      pt_regs which contains kernel stack pointer.
+ * @addr:      address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs,
+                                          unsigned long addr)
+{
+       unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS;
+       return ((addr & ~(THREAD_SIZE - 1))  ==
+               (ksp & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:      pt_regs which contains kernel stack pointer.
+ * @n:         stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+       unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS;
+       unsigned long *addr = (unsigned long *)ksp;
+       addr += n;
+       if (regs_within_kernel_stack(regs, (unsigned long)addr))
+               return *addr;
+       else
+               return 0;
+}
index eac7f0db5c8c6269a913152a11941f66f5f3e8d0..5253e895b81b7214626e1afaf0c6157ed00519aa 100644 (file)
@@ -89,3 +89,4 @@ sys_call_table:
 /*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
 /*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
+/*360*/        .long sys_statx
index b0f17ff2ddba2daa75a8e8a646b5661dfe0d36e9..82339f6be0b2d56427a2b9e0f0e75ef5c8ab211a 100644 (file)
@@ -90,6 +90,7 @@ sys_call_table32:
        .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
        .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
+/*360*/        .word sys_statx
 
 #endif /* CONFIG_COMPAT */
 
@@ -171,3 +172,4 @@ sys_call_table:
        .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
        .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
+/*360*/        .word sys_statx
index ee5273ad918de6302cb0f6644b2a4179f8c26351..7c29d38e6b99c68c5ac746eb3d3a3fe1da8394a5 100644 (file)
@@ -461,6 +461,22 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
        pgd_t *pgd;
        unsigned long next;
 
+       addr &= PMD_MASK;
+       if (addr < floor) {
+               addr += PMD_SIZE;
+               if (!addr)
+                       return;
+       }
+       if (ceiling) {
+               ceiling &= PMD_MASK;
+               if (!ceiling)
+                       return;
+       }
+       if (end - 1 > ceiling - 1)
+               end -= PMD_SIZE;
+       if (addr > end - 1)
+               return;
+
        pgd = pgd_offset(tlb->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
index a94a4d10f2dfa426d3746cfc9e528d8c91b7e824..49d160b781f09609f1c7917a8ee8853c40d6f99f 100644 (file)
@@ -154,6 +154,14 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
   else
     ifeq ($(call cc-option-yn, -mfentry), n)
        ACCUMULATE_OUTGOING_ARGS := 1
+
+       # GCC ignores '-maccumulate-outgoing-args' when used with '-Os'.
+       # If '-Os' is enabled, disable it and print a warning.
+        ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+          undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
+         $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE.  Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
+        endif
+
     endif
   endif
 endif
index 7853b53959cd35a8d555a5ddebfbb4af1f40562a..3f9d1a83891adf9f47cba058078b447fb1de7ecf 100644 (file)
@@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
 {
        vdso32_enabled = simple_strtoul(s, NULL, 0);
 
-       if (vdso32_enabled > 1)
+       if (vdso32_enabled > 1) {
                pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
+               vdso32_enabled = 0;
+       }
 
        return 1;
 }
@@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
 /* Register vsyscall32 into the ABI table */
 #include <linux/sysctl.h>
 
+static const int zero;
+static const int one = 1;
+
 static struct ctl_table abi_table2[] = {
        {
                .procname       = "vsyscall32",
                .data           = &vdso32_enabled,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = (int *)&zero,
+               .extra2         = (int *)&one,
        },
        {}
 };
index 9d05c7e67f6073e3441c164d1bdc6db390507ef0..a45e2114a8460925b44bd6e0983f3ee37e83d861 100644 (file)
@@ -761,7 +761,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
 
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE,   hsw_rapl_init),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E,   hsw_rapl_init),
-       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,      hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,      hsx_rapl_init),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
 
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
index 9d49c18b5ea9360feb5e5bb1fe378914154f34d5..3762536619f8cb83c17777e60bbd56830d792cde 100644 (file)
@@ -287,7 +287,7 @@ struct task_struct;
 
 #define        ARCH_DLINFO_IA32                                                \
 do {                                                                   \
-       if (vdso32_enabled) {                                           \
+       if (VDSO_CURRENT_BASE) {                                        \
                NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY);                    \
                NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE);        \
        }                                                               \
index 2c1ebeb4d7376db6350b7266048a1d37d800ac86..529bb4a6487a9e42d07d0c4ceb226e41b5808f0f 100644 (file)
@@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
  * @size:      number of bytes to write back
  *
  * Write back a cache range using the CLWB (cache line write back)
- * instruction.
+ * instruction. Note that @size is internally rounded up to be cache
+ * line size aligned.
  */
 static inline void arch_wb_cache_pmem(void *addr, size_t size)
 {
@@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
                clwb(p);
 }
 
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
-       return iter_is_iovec(i) == false;
-}
-
 /**
  * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
  * @addr:      PMEM destination address
@@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
        /* TODO: skip the write-back by always using non-temporal stores */
        len = copy_from_iter_nocache(addr, bytes, i);
 
-       if (__iter_needs_pmem_wb(i))
+       /*
+        * In the iovec case on x86_64 copy_from_iter_nocache() uses
+        * non-temporal stores for the bulk of the transfer, but we need
+        * to manually flush if the transfer is unaligned. A cached
+        * memory copy is used when destination or size is not naturally
+        * aligned. That is:
+        *   - Require 8-byte alignment when size is 8 bytes or larger.
+        *   - Require 4-byte alignment when size is 4 bytes.
+        *
+        * In the non-iovec case the entire destination needs to be
+        * flushed.
+        */
+       if (iter_is_iovec(i)) {
+               unsigned long flushed, dest = (unsigned long) addr;
+
+               if (bytes < 8) {
+                       if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+                               arch_wb_cache_pmem(addr, 1);
+               } else {
+                       if (!IS_ALIGNED(dest, 8)) {
+                               dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+                               arch_wb_cache_pmem(addr, 1);
+                       }
+
+                       flushed = dest - (unsigned long) addr;
+                       if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
+                               arch_wb_cache_pmem(addr + bytes - 1, 1);
+               }
+       } else
                arch_wb_cache_pmem(addr, bytes);
 
        return len;
index f369cb8db0d5b4332125745dd5a587148f109597..badd2b31a5605f3df24dd250d1f08c9df022b7b2 100644 (file)
@@ -200,11 +200,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
        }
 
 out:
-       rdtgroup_kn_unlock(of->kn);
        for_each_enabled_rdt_resource(r) {
                kfree(r->tmp_cbms);
                r->tmp_cbms = NULL;
        }
+       rdtgroup_kn_unlock(of->kn);
        return ret ?: nbytes;
 }
 
index 1e5a50c11d3c3546a59d286cbb84c402682cfb2b..217cd4449bc9db3aedfd6367529bc9ca99fb8443 100644 (file)
@@ -85,7 +85,7 @@ void mce_gen_pool_process(struct work_struct *__unused)
        head = llist_reverse_order(head);
        llist_for_each_entry_safe(node, tmp, head, llnode) {
                mce = &node->mce;
-               atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
+               blocking_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
                gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
        }
 }
index 903043e6a62b36a2c395c7aab52da6f85e34fc11..19592ba1a320030a4cbdc59aa8194f14bcefae69 100644 (file)
@@ -13,7 +13,7 @@ enum severity_level {
        MCE_PANIC_SEVERITY,
 };
 
-extern struct atomic_notifier_head x86_mce_decoder_chain;
+extern struct blocking_notifier_head x86_mce_decoder_chain;
 
 #define ATTR_LEN               16
 #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */
index 5accfbdee3f06fac48eaf4423d66d7bee1e3a0a3..af44ebeb593fb60f70bb497de3e8f09b4b988333 100644 (file)
@@ -123,7 +123,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
  * CPU/chipset specific EDAC code can register a notifier call here to print
  * MCE errors in a human-readable form.
  */
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
 
 /* Do initial initialization of a struct mce */
 void mce_setup(struct mce *m)
@@ -220,7 +220,7 @@ void mce_register_decode_chain(struct notifier_block *nb)
 
        WARN_ON(nb->priority > MCE_PRIO_LOWEST && nb->priority < MCE_PRIO_EDAC);
 
-       atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
+       blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
 }
 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
 
@@ -228,7 +228,7 @@ void mce_unregister_decode_chain(struct notifier_block *nb)
 {
        atomic_dec(&num_notifiers);
 
-       atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
+       blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
 }
 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
 
@@ -321,18 +321,7 @@ static void __print_mce(struct mce *m)
 
 static void print_mce(struct mce *m)
 {
-       int ret = 0;
-
        __print_mce(m);
-
-       /*
-        * Print out human-readable details about the MCE error,
-        * (if the CPU has an implementation for that)
-        */
-       ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
-       if (ret == NOTIFY_STOP)
-               return;
-
        pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
 }
 
index cbd73eb4217026f1ef39e913cceb04d5c1af4e3a..5b71535407279f3c4d70ce628f9bf5dc8561abea 100644 (file)
 #include <asm/ftrace.h>
 #include <asm/nops.h>
 
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && \
-       !defined(CC_USING_FENTRY) && \
-       !defined(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE)
-# error The following combination is not supported: ((compiler missing -mfentry) || (CONFIG_X86_32 and !CONFIG_DYNAMIC_FTRACE)) && CONFIG_FUNCTION_GRAPH_TRACER && CONFIG_CC_OPTIMIZE_FOR_SIZE
-#endif
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 int ftrace_arch_code_modify_prepare(void)
@@ -989,6 +983,18 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
        unsigned long return_hooker = (unsigned long)
                                &return_to_handler;
 
+       /*
+        * When resuming from suspend-to-ram, this function can be indirectly
+        * called from early CPU startup code while the CPU is in real mode,
+        * which would fail miserably.  Make sure the stack pointer is a
+        * virtual address.
+        *
+        * This check isn't as accurate as virt_addr_valid(), but it should be
+        * good enough for this purpose, and it's fast.
+        */
+       if (unlikely((long)__builtin_frame_address(0) >= 0))
+               return;
+
        if (unlikely(ftrace_graph_is_dead()))
                return;
 
index 396c042e9d0ee58873de5c68e6e7e22186070147..cc30a74e4adb2c3499b52488c49498dea1851ae7 100644 (file)
@@ -846,7 +846,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
                       task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
                       me->comm, me->pid, where, frame,
                       regs->ip, regs->sp, regs->orig_ax);
-               print_vma_addr(" in ", regs->ip);
+               print_vma_addr(KERN_CONT " in ", regs->ip);
                pr_cont("\n");
        }
 
index ec1f756f9dc9ace1badccd544b6032d64e520360..71beb28600d4531d93a9966401a1ca082a9f0c2e 100644 (file)
@@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
 
                                if (from->si_signo == SIGSEGV) {
                                        if (from->si_code == SEGV_BNDERR) {
-                                               compat_uptr_t lower = (unsigned long)&to->si_lower;
-                                               compat_uptr_t upper = (unsigned long)&to->si_upper;
+                                               compat_uptr_t lower = (unsigned long)from->si_lower;
+                                               compat_uptr_t upper = (unsigned long)from->si_upper;
                                                put_user_ex(lower, &to->si_lower);
                                                put_user_ex(upper, &to->si_upper);
                                        }
index 948443e115c147c28a6445eca725fad698d1a56f..4e496379a871687281ddc0c69c0e10d7ec036f09 100644 (file)
@@ -255,7 +255,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
                pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
                        tsk->comm, tsk->pid, str,
                        regs->ip, regs->sp, error_code);
-               print_vma_addr(" in ", regs->ip);
+               print_vma_addr(KERN_CONT " in ", regs->ip);
                pr_cont("\n");
        }
 
@@ -519,7 +519,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
                pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
                        tsk->comm, task_pid_nr(tsk),
                        regs->ip, regs->sp, error_code);
-               print_vma_addr(" in ", regs->ip);
+               print_vma_addr(KERN_CONT " in ", regs->ip);
                pr_cont("\n");
        }
 
index a8e91ae89fb3048c78aa4a279565070bb1958902..29df077cb0899be78cf604a3e21eb7262a729159 100644 (file)
@@ -93,6 +93,13 @@ static void delay_mwaitx(unsigned long __loops)
 {
        u64 start, end, delay, loops = __loops;
 
+       /*
+        * Timer value of 0 causes MWAITX to wait indefinitely, unless there
+        * is a store on the memory monitored by MONITORX.
+        */
+       if (loops == 0)
+               return;
+
        start = rdtsc_ordered();
 
        for (;;) {
index 22af912d66d258f413414c7355ac82da2814b074..889e7619a0914d87ad49dbb5b960933e5dd316ad 100644 (file)
@@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  * is valid. The argument is a physical page number.
  *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains BIOS code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
+ * On x86, access has to be given to the first megabyte of RAM because that
+ * area traditionally contains BIOS code and data regions used by X, dosemu,
+ * and similar apps. Since they map the entire memory range, the whole range
+ * must be allowed (for mapping), but any areas that would otherwise be
+ * disallowed are flagged as being "zero filled" instead of rejected.
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
  */
 int devmem_is_allowed(unsigned long pagenr)
 {
-       if (pagenr < 256)
-               return 1;
-       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+       if (page_is_ram(pagenr)) {
+               /*
+                * For disallowed memory regions in the low 1MB range,
+                * request that the page be shown as all zeros.
+                */
+               if (pagenr < 256)
+                       return 2;
+
+               return 0;
+       }
+
+       /*
+        * This must follow RAM test, since System RAM is considered a
+        * restricted resource under CONFIG_STRICT_IOMEM.
+        */
+       if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
+               /* Low 1MB bypasses iomem restrictions. */
+               if (pagenr < 256)
+                       return 1;
+
                return 0;
-       if (!page_is_ram(pagenr))
-               return 1;
-       return 0;
+       }
+
+       return 1;
 }
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
index 30031d5293c483202c526d5045cda23be6617359..cdfe8c62895981029b8f69218b717b05d0b8961a 100644 (file)
@@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
                return;
        }
 
+       /* No need to reserve regions that will never be freed. */
+       if (md.attribute & EFI_MEMORY_RUNTIME)
+               return;
+
        size += addr % EFI_PAGE_SIZE;
        size = round_up(size, EFI_PAGE_SIZE);
        addr = round_down(addr, EFI_PAGE_SIZE);
index 572966f495966b7fe8ce486ad4e5912a0d044eb5..c7836a1ded973e23026143edb95e36a4e86ad294 100644 (file)
@@ -2928,8 +2928,17 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
        hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
        if (!blk_qc_t_is_internal(cookie))
                rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
-       else
+       else {
                rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
+               /*
+                * With scheduling, if the request has completed, we'll
+                * get a NULL return here, as we clear the sched tag when
+                * that happens. The request still remains valid, like always,
+                * so we should be safe with just the NULL check.
+                */
+               if (!rq)
+                       return false;
+       }
 
        return __blk_mq_poll(hctx, rq);
 }
index dbeecf7be719eaada4457ed3c7ac799fd0bbacec..4d9084a14c1093b1c00d79f079663a643c5d05fe 100644 (file)
@@ -1098,12 +1098,20 @@ int elevator_change(struct request_queue *q, const char *name)
 }
 EXPORT_SYMBOL(elevator_change);
 
+static inline bool elv_support_iosched(struct request_queue *q)
+{
+       if (q->mq_ops && q->tag_set && (q->tag_set->flags &
+                               BLK_MQ_F_NO_SCHED))
+               return false;
+       return true;
+}
+
 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
                          size_t count)
 {
        int ret;
 
-       if (!(q->mq_ops || q->request_fn))
+       if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
                return count;
 
        ret = __elevator_change(q, name);
@@ -1135,7 +1143,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
                        len += sprintf(name+len, "[%s] ", elv->elevator_name);
                        continue;
                }
-               if (__e->uses_mq && q->mq_ops)
+               if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
                        len += sprintf(name+len, "%s ", __e->elevator_name);
                else if (!__e->uses_mq && !q->mq_ops)
                        len += sprintf(name+len, "%s ", __e->elevator_name);
index e58c4970c22b7cc1cdb5e8f08875d9c1e7714568..826cd7ab4d4a2ec830438b40e987e230bd03f32f 100644 (file)
@@ -32,6 +32,7 @@ struct ahash_request_priv {
        crypto_completion_t complete;
        void *data;
        u8 *result;
+       u32 flags;
        void *ubuf[] CRYPTO_MINALIGN_ATTR;
 };
 
@@ -253,6 +254,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
        priv->result = req->result;
        priv->complete = req->base.complete;
        priv->data = req->base.data;
+       priv->flags = req->base.flags;
+
        /*
         * WARNING: We do not backup req->priv here! The req->priv
         *          is for internal use of the Crypto API and the
@@ -267,38 +270,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
        return 0;
 }
 
-static void ahash_restore_req(struct ahash_request *req)
+static void ahash_restore_req(struct ahash_request *req, int err)
 {
        struct ahash_request_priv *priv = req->priv;
 
+       if (!err)
+               memcpy(priv->result, req->result,
+                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
        /* Restore the original crypto request. */
        req->result = priv->result;
-       req->base.complete = priv->complete;
-       req->base.data = priv->data;
+
+       ahash_request_set_callback(req, priv->flags,
+                                  priv->complete, priv->data);
        req->priv = NULL;
 
        /* Free the req->priv.priv from the ADJUSTED request. */
        kzfree(priv);
 }
 
-static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+static void ahash_notify_einprogress(struct ahash_request *req)
 {
        struct ahash_request_priv *priv = req->priv;
+       struct crypto_async_request oreq;
 
-       if (err == -EINPROGRESS)
-               return;
-
-       if (!err)
-               memcpy(priv->result, req->result,
-                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+       oreq.data = priv->data;
 
-       ahash_restore_req(req);
+       priv->complete(&oreq, -EINPROGRESS);
 }
 
 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
 {
        struct ahash_request *areq = req->data;
 
+       if (err == -EINPROGRESS) {
+               ahash_notify_einprogress(areq);
+               return;
+       }
+
        /*
         * Restore the original request, see ahash_op_unaligned() for what
         * goes where.
@@ -309,7 +318,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
         */
 
        /* First copy req->result into req->priv.result */
-       ahash_op_unaligned_finish(areq, err);
+       ahash_restore_req(areq, err);
 
        /* Complete the ORIGINAL request. */
        areq->base.complete(&areq->base, err);
@@ -325,7 +334,12 @@ static int ahash_op_unaligned(struct ahash_request *req,
                return err;
 
        err = op(req);
-       ahash_op_unaligned_finish(req, err);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
+
+       ahash_restore_req(req, err);
 
        return err;
 }
@@ -360,25 +374,14 @@ int crypto_ahash_digest(struct ahash_request *req)
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
 
-static void ahash_def_finup_finish2(struct ahash_request *req, int err)
+static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
 {
-       struct ahash_request_priv *priv = req->priv;
+       struct ahash_request *areq = req->data;
 
        if (err == -EINPROGRESS)
                return;
 
-       if (!err)
-               memcpy(priv->result, req->result,
-                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
-       ahash_restore_req(req);
-}
-
-static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
-{
-       struct ahash_request *areq = req->data;
-
-       ahash_def_finup_finish2(areq, err);
+       ahash_restore_req(areq, err);
 
        areq->base.complete(&areq->base, err);
 }
@@ -389,11 +392,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
                goto out;
 
        req->base.complete = ahash_def_finup_done2;
-       req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
        err = crypto_ahash_reqtfm(req)->final(req);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
 
 out:
-       ahash_def_finup_finish2(req, err);
+       ahash_restore_req(req, err);
        return err;
 }
 
@@ -401,7 +408,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
 {
        struct ahash_request *areq = req->data;
 
+       if (err == -EINPROGRESS) {
+               ahash_notify_einprogress(areq);
+               return;
+       }
+
+       areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
        err = ahash_def_finup_finish1(areq, err);
+       if (areq->priv)
+               return;
 
        areq->base.complete(&areq->base, err);
 }
@@ -416,6 +432,11 @@ static int ahash_def_finup(struct ahash_request *req)
                return err;
 
        err = tfm->update(req);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
+
        return ahash_def_finup_finish1(req, err);
 }
 
index 5a805375865731f4cc7e94790362c208dd58d05b..ef59d9926ee99bccfbdc6077451cb95008d07651 100644 (file)
@@ -40,6 +40,7 @@ struct aead_async_req {
        struct aead_async_rsgl first_rsgl;
        struct list_head list;
        struct kiocb *iocb;
+       struct sock *sk;
        unsigned int tsgls;
        char iv[];
 };
@@ -379,12 +380,10 @@ unlock:
 
 static void aead_async_cb(struct crypto_async_request *_req, int err)
 {
-       struct sock *sk = _req->data;
-       struct alg_sock *ask = alg_sk(sk);
-       struct aead_ctx *ctx = ask->private;
-       struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
-       struct aead_request *req = aead_request_cast(_req);
+       struct aead_request *req = _req->data;
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
+       struct sock *sk = areq->sk;
        struct scatterlist *sg = areq->tsgl;
        struct aead_async_rsgl *rsgl;
        struct kiocb *iocb = areq->iocb;
@@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
        memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
        INIT_LIST_HEAD(&areq->list);
        areq->iocb = msg->msg_iocb;
+       areq->sk = sk;
        memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
        aead_request_set_tfm(req, tfm);
        aead_request_set_ad(req, ctx->aead_assoclen);
        aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                 aead_async_cb, sk);
+                                 aead_async_cb, req);
        used -= ctx->aead_assoclen;
 
        /* take over all tx sgls from ctx */
index 3ea095adafd9af0067f695c4c70e2af702f70923..a8bfae4451bfcbfd26e25bbb39280818dcc139d4 100644 (file)
@@ -345,6 +345,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -352,6 +359,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
@@ -389,6 +397,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -396,6 +411,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
index c976bfac29da526844f6a5578fea1455945c26f3..89ace5ebc2da88df3e049e89e1d2640b2e361e3d 100644 (file)
@@ -286,6 +286,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -293,6 +300,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
@@ -330,6 +338,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -337,6 +352,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
index c86bae7b1d0fc4d0fce5832a1f2716f59d91a7e6..ff096d9755b925d9f72105f42993ebcc7c0522e1 100644 (file)
@@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
 
        ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
 
-       /*
-        * The absolute minimum resource template is one end_tag descriptor.
-        * However, we will treat a lone end_tag as just a simple buffer.
-        */
+       /* The absolute minimum resource template is one end_tag descriptor */
+
        if (aml_length < sizeof(struct aml_resource_end_tag)) {
                return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
        }
@@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
                /* Invoke the user function */
 
                if (user_function) {
-                       status = user_function(aml, length, offset,
-                                              resource_index, context);
+                       status =
+                           user_function(aml, length, offset, resource_index,
+                                         context);
                        if (ACPI_FAILURE(status)) {
                                return_ACPI_STATUS(status);
                        }
@@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
                                *context = aml;
                        }
 
-                       /* Check if buffer is defined to be longer than the resource length */
-
-                       if (aml_length > (offset + length)) {
-                               return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
-                       }
-
                        /* Normal exit */
 
                        return_ACPI_STATUS(AE_OK);
index 662036bdc65eca8d531886cc658fd9829cc60c00..c8ea9d698cd0f30546d731df6429b3f556140a76 100644 (file)
@@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
        const struct nfit_set_info_map *map0 = m0;
        const struct nfit_set_info_map *map1 = m1;
 
-       return map0->region_offset - map1->region_offset;
+       if (map0->region_offset < map1->region_offset)
+               return -1;
+       else if (map0->region_offset > map1->region_offset)
+               return 1;
+       return 0;
 }
 
 /* Retrieve the nth entry referencing this spa */
index fcd4ce6f78d5d387080343d96738c3b7275d0324..1c2b846c577604d0b471e87d19cecea6caa286f9 100644 (file)
@@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
                return -EINVAL;
 
        /* The state of the list is 'on' IFF all resources are 'on'. */
+       cur_state = 0;
        list_for_each_entry(entry, list, node) {
                struct acpi_power_resource *resource = entry->resource;
                acpi_handle handle = resource->device.handle;
index 192691880d55c9499e1d77e68058a56d8e5318f3..2433569b02ef5cf40dd82cebec6fc83186f4dc3b 100644 (file)
@@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device)
                return;
 
        device->flags.match_driver = true;
-       if (!ret) {
-               ret = device_attach(&device->dev);
-               if (ret < 0)
-                       return;
-
-               if (!ret && device->pnp.type.platform_id)
-                       acpi_default_enumeration(device);
+       if (ret > 0) {
+               acpi_device_set_enumerated(device);
+               goto ok;
        }
 
+       ret = device_attach(&device->dev);
+       if (ret < 0)
+               return;
+
+       if (ret > 0 || !device->pnp.type.platform_id)
+               acpi_device_set_enumerated(device);
+       else
+               acpi_default_enumeration(device);
+
  ok:
        list_for_each_entry(child, &device->children, node)
                acpi_bus_attach(child);
index 6c9aa95a9a050cc070ab222a382f5dabd55ec7ea..49d705c9f0f7b9c6b2ef2549769b6901438c2854 100644 (file)
@@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        };
        const struct ata_port_info *ppi[] = { &info, &info };
 
-       /* SB600/700 don't have secondary port wired */
-       if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
-               (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
-               ppi[1] = &ata_dummy_port_info;
-
        return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
                                      ATA_HOST_PARALLEL_SCAN);
 }
index 0636d84fbefe0acc889004b39e83c0137fc0f6a0..f3f538eec7b3bb85b682368ffb42496989c97263 100644 (file)
@@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
                pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
        }
 
-       /* enable IRQ on hotplug */
-       pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
-       if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
-               dev_dbg(&pdev->dev,
-                       "enabling SATA hotplug (0x%x)\n",
-                       (int) tmp8);
-               tmp8 |= SATA_HOTPLUG;
-               pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+       if (board_id == vt6421) {
+               /* enable IRQ on hotplug */
+               pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
+               if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
+                       dev_dbg(&pdev->dev,
+                               "enabling SATA hotplug (0x%x)\n",
+                               (int) tmp8);
+                       tmp8 |= SATA_HOTPLUG;
+                       pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+               }
        }
 
        /*
index f96ab717534c4c8fe60e830c4020c6bd0517cf07..1d1dc11aa5faef6a9422faeb0fa7bd3a2732e860 100644 (file)
@@ -3969,7 +3969,7 @@ static int mtip_block_initialize(struct driver_data *dd)
        dd->tags.reserved_tags = 1;
        dd->tags.cmd_size = sizeof(struct mtip_cmd);
        dd->tags.numa_node = dd->numa_node;
-       dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
+       dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED;
        dd->tags.driver_data = dd;
        dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
 
index dceb5edd1e5455f4c1b101e8ad3ce4dba46ac22f..0c09d42561081ebb393398ae21a6900283d8f007 100644 (file)
@@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 
        cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
        if (size == PAGE_SIZE) {
-               copy_page(mem, cmem);
+               memcpy(mem, cmem, PAGE_SIZE);
        } else {
                struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
 
@@ -717,7 +717,7 @@ compress_again:
 
        if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
                src = kmap_atomic(page);
-               copy_page(cmem, src);
+               memcpy(cmem, src, PAGE_SIZE);
                kunmap_atomic(src);
        } else {
                memcpy(cmem, src, clen);
@@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
        }
 
        index = sector >> SECTORS_PER_PAGE_SHIFT;
-       offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
+       offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
 
        bv.bv_page = page;
        bv.bv_len = PAGE_SIZE;
index 6d9cc2d39d22306fd68f30bac6f4a60e6cfa5a87..7e4a9d1296bb7fb666f6b37ced2757a8585b7d75 100644 (file)
@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 #endif
 
 #ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+       return devmem_is_allowed(pfn);
+}
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
        u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
        return 1;
 }
 #else
+static inline int page_is_allowed(unsigned long pfn)
+{
+       return 1;
+}
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
        return 1;
@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 
        while (count > 0) {
                unsigned long remaining;
+               int allowed;
 
                sz = size_inside_page(p, count);
 
-               if (!range_is_allowed(p >> PAGE_SHIFT, count))
+               allowed = page_is_allowed(p >> PAGE_SHIFT);
+               if (!allowed)
                        return -EPERM;
+               if (allowed == 2) {
+                       /* Show zeros for restricted memory. */
+                       remaining = clear_user(buf, sz);
+               } else {
+                       /*
+                        * On ia64 if a page has been mapped somewhere as
+                        * uncached, then it must also be accessed uncached
+                        * by the kernel or data corruption may occur.
+                        */
+                       ptr = xlate_dev_mem_ptr(p);
+                       if (!ptr)
+                               return -EFAULT;
 
-               /*
-                * On ia64 if a page has been mapped somewhere as uncached, then
-                * it must also be accessed uncached by the kernel or data
-                * corruption may occur.
-                */
-               ptr = xlate_dev_mem_ptr(p);
-               if (!ptr)
-                       return -EFAULT;
+                       remaining = copy_to_user(buf, ptr, sz);
+
+                       unxlate_dev_mem_ptr(p, ptr);
+               }
 
-               remaining = copy_to_user(buf, ptr, sz);
-               unxlate_dev_mem_ptr(p, ptr);
                if (remaining)
                        return -EFAULT;
 
@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
 #endif
 
        while (count > 0) {
+               int allowed;
+
                sz = size_inside_page(p, count);
 
-               if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+               allowed = page_is_allowed(p >> PAGE_SHIFT);
+               if (!allowed)
                        return -EPERM;
 
-               /*
-                * On ia64 if a page has been mapped somewhere as uncached, then
-                * it must also be accessed uncached by the kernel or data
-                * corruption may occur.
-                */
-               ptr = xlate_dev_mem_ptr(p);
-               if (!ptr) {
-                       if (written)
-                               break;
-                       return -EFAULT;
-               }
+               /* Skip actual writing when a page is marked as restricted. */
+               if (allowed == 1) {
+                       /*
+                        * On ia64 if a page has been mapped somewhere as
+                        * uncached, then it must also be accessed uncached
+                        * by the kernel or data corruption may occur.
+                        */
+                       ptr = xlate_dev_mem_ptr(p);
+                       if (!ptr) {
+                               if (written)
+                                       break;
+                               return -EFAULT;
+                       }
 
-               copied = copy_from_user(ptr, buf, sz);
-               unxlate_dev_mem_ptr(p, ptr);
-               if (copied) {
-                       written += sz - copied;
-                       if (written)
-                               break;
-                       return -EFAULT;
+                       copied = copy_from_user(ptr, buf, sz);
+                       unxlate_dev_mem_ptr(p, ptr);
+                       if (copied) {
+                               written += sz - copied;
+                               if (written)
+                                       break;
+                               return -EFAULT;
+                       }
                }
 
                buf += sz;
index e9b7e0b3cabe60d3be3ab8a092159b137854d8ec..87fe111d0be6b03ec8157f0a688046d490d7e887 100644 (file)
@@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
 
        vdev->config->reset(vdev);
 
-       virtqueue_disable_cb(portdev->c_ivq);
+       if (use_multiport(portdev))
+               virtqueue_disable_cb(portdev->c_ivq);
        cancel_work_sync(&portdev->control_work);
        cancel_work_sync(&portdev->config_work);
        /*
         * Once more: if control_work_handler() was running, it would
         * enable the cb as the last step.
         */
-       virtqueue_disable_cb(portdev->c_ivq);
+       if (use_multiport(portdev))
+               virtqueue_disable_cb(portdev->c_ivq);
        remove_controlq_data(portdev);
 
        list_for_each_entry(port, &portdev->ports, list) {
index ab609a76706f7bb0258ce47dda61366a0602d5cc..cf9449b3dbd9742bd8a3559c9939af9e057d9b5f 100644 (file)
@@ -429,6 +429,13 @@ static const struct clk_div_table pll_divp_table[] = {
        { 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 }
 };
 
+static const struct clk_div_table pll_divq_table[] = {
+       { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
+       { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, { 12, 12 }, { 13, 13 },
+       { 14, 14 }, { 15, 15 },
+       { 0 }
+};
+
 static const struct clk_div_table pll_divr_table[] = {
        { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
 };
@@ -496,9 +503,9 @@ struct stm32f4_div_data {
 
 #define MAX_PLL_DIV 3
 static const struct stm32f4_div_data  div_data[MAX_PLL_DIV] = {
-       { 16, 2, 0,                     pll_divp_table  },
-       { 24, 4, CLK_DIVIDER_ONE_BASED, NULL            },
-       { 28, 3, 0,                     pll_divr_table  },
+       { 16, 2, 0, pll_divp_table },
+       { 24, 4, 0, pll_divq_table },
+       { 28, 3, 0, pll_divr_table },
 };
 
 struct stm32f4_pll_data {
index 72109d2cf41b29da83dd88dbdb0e820364c893dd..a077ab6edffae759564362b85fd596887ea7e89a 100644 (file)
@@ -1,6 +1,7 @@
 config SUNXI_CCU
        bool "Clock support for Allwinner SoCs"
        depends on ARCH_SUNXI || COMPILE_TEST
+       select RESET_CONTROLLER
        default ARCH_SUNXI
 
 if SUNXI_CCU
@@ -15,7 +16,7 @@ config SUNXI_CCU_FRAC
        bool
 
 config SUNXI_CCU_GATE
-       bool
+       def_bool y
 
 config SUNXI_CCU_MUX
        bool
@@ -135,6 +136,7 @@ config SUN8I_V3S_CCU
 config SUN9I_A80_CCU
        bool "Support for the Allwinner A80 CCU"
        select SUNXI_CCU_DIV
+       select SUNXI_CCU_MULT
        select SUNXI_CCU_GATE
        select SUNXI_CCU_NKMP
        select SUNXI_CCU_NM
index a7b3c08ed0e232c0cf41ae419f4980bf1614590e..2c69b631967aea3ae81389d29b20e8014c3030e1 100644 (file)
@@ -752,6 +752,13 @@ static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = {
        .num_resets     = ARRAY_SIZE(sun8i_a33_ccu_resets),
 };
 
+static struct ccu_pll_nb sun8i_a33_pll_cpu_nb = {
+       .common = &pll_cpux_clk.common,
+       /* copy from pll_cpux_clk */
+       .enable = BIT(31),
+       .lock   = BIT(28),
+};
+
 static struct ccu_mux_nb sun8i_a33_cpu_nb = {
        .common         = &cpux_clk.common,
        .cm             = &cpux_clk.mux,
@@ -783,6 +790,10 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
 
        sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
 
+       /* Gate then ungate PLL CPU after any rate changes */
+       ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb);
+
+       /* Reparent CPU during PLL CPU rate changes */
        ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
                                  &sun8i_a33_cpu_nb);
 }
index 8a47bafd78905bce849235d791f1469d448afcc9..9d8724715a4352ddd07a411945bc1cd58367053d 100644 (file)
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/iopoll.h>
 #include <linux/slab.h>
 
 #include "ccu_common.h"
+#include "ccu_gate.h"
 #include "ccu_reset.h"
 
 static DEFINE_SPINLOCK(ccu_lock);
@@ -39,6 +41,53 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
        WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
 }
 
+/*
+ * This clock notifier is called when the frequency of a PLL clock is
+ * changed. In common PLL designs, changes to the dividers take effect
+ * almost immediately, while changes to the multipliers (implemented
+ * as dividers in the feedback loop) take a few cycles to work into
+ * the feedback loop for the PLL to stablize.
+ *
+ * Sometimes when the PLL clock rate is changed, the decrease in the
+ * divider is too much for the decrease in the multiplier to catch up.
+ * The PLL clock rate will spike, and in some cases, might lock up
+ * completely.
+ *
+ * This notifier callback will gate and then ungate the clock,
+ * effectively resetting it, so it proceeds to work. Care must be
+ * taken to reparent consumers to other temporary clocks during the
+ * rate change, and that this notifier callback must be the first
+ * to be registered.
+ */
+static int ccu_pll_notifier_cb(struct notifier_block *nb,
+                              unsigned long event, void *data)
+{
+       struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
+       int ret = 0;
+
+       if (event != POST_RATE_CHANGE)
+               goto out;
+
+       ccu_gate_helper_disable(pll->common, pll->enable);
+
+       ret = ccu_gate_helper_enable(pll->common, pll->enable);
+       if (ret)
+               goto out;
+
+       ccu_helper_wait_for_lock(pll->common, pll->lock);
+
+out:
+       return notifier_from_errno(ret);
+}
+
+int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
+{
+       pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
+
+       return clk_notifier_register(pll_nb->common->hw.clk,
+                                    &pll_nb->clk_nb);
+}
+
 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
                    const struct sunxi_ccu_desc *desc)
 {
index 73d81dc58fc5ad91f8a293530aa89e2b22fcbdb3..d6fdd7a789aa746a72f939fb51c552004941fd37 100644 (file)
@@ -83,6 +83,18 @@ struct sunxi_ccu_desc {
 
 void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock);
 
+struct ccu_pll_nb {
+       struct notifier_block   clk_nb;
+       struct ccu_common       *common;
+
+       u32     enable;
+       u32     lock;
+};
+
+#define to_ccu_pll_nb(_nb) container_of(_nb, struct ccu_pll_nb, clk_nb)
+
+int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb);
+
 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
                    const struct sunxi_ccu_desc *desc);
 
index bc96d423781aa8a300725f8fbe0a052be12cd4b5..0e3f6496524d92c7c1717d8d2259684952d7acb8 100644 (file)
@@ -2398,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
  *********************************************************************/
 static enum cpuhp_state hp_online;
 
+static int cpuhp_cpufreq_online(unsigned int cpu)
+{
+       cpufreq_online(cpu);
+
+       return 0;
+}
+
+static int cpuhp_cpufreq_offline(unsigned int cpu)
+{
+       cpufreq_offline(cpu);
+
+       return 0;
+}
+
 /**
  * cpufreq_register_driver - register a CPU Frequency driver
  * @driver_data: A struct cpufreq_driver containing the values#
@@ -2460,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        }
 
        ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
-                                       cpufreq_online,
-                                       cpufreq_offline);
+                                       cpuhp_cpufreq_online,
+                                       cpuhp_cpufreq_offline);
        if (ret < 0)
                goto err_if_unreg;
        hp_online = ret;
index 3e2ab3b14eea205f19e8b436291e5117cec9567d..9e95bf94eb13ff2e830905694e0ce4c045fc76b1 100644 (file)
@@ -2,6 +2,7 @@ menuconfig DEV_DAX
        tristate "DAX: direct access to differentiated memory"
        default m if NVDIMM_DAX
        depends on TRANSPARENT_HUGEPAGE
+       select SRCU
        help
          Support raw access to differentiated (persistence, bandwidth,
          latency...) memory via an mmap(2) capable character
index 80c6db279ae10cb8558b2e90a91a4c4dafa917e0..806f180c80d816b313319f960479a7ad2848c672 100644 (file)
@@ -25,6 +25,7 @@
 #include "dax.h"
 
 static dev_t dax_devt;
+DEFINE_STATIC_SRCU(dax_srcu);
 static struct class *dax_class;
 static DEFINE_IDA(dax_minor_ida);
 static int nr_dax = CONFIG_NR_DEV_DAX;
@@ -60,7 +61,7 @@ struct dax_region {
  * @region - parent region
  * @dev - device backing the character device
  * @cdev - core chardev data
- * @alive - !alive + rcu grace period == no new mappings can be established
+ * @alive - !alive + srcu grace period == no new mappings can be established
  * @id - child id in the region
  * @num_resources - number of physical address extents in this device
  * @res - array of physical address ranges
@@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 static int dax_dev_huge_fault(struct vm_fault *vmf,
                enum page_entry_size pe_size)
 {
-       int rc;
+       int rc, id;
        struct file *filp = vmf->vma->vm_file;
        struct dax_dev *dax_dev = filp->private_data;
 
@@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
                        ? "write" : "read",
                        vmf->vma->vm_start, vmf->vma->vm_end);
 
-       rcu_read_lock();
+       id = srcu_read_lock(&dax_srcu);
        switch (pe_size) {
        case PE_SIZE_PTE:
                rc = __dax_dev_pte_fault(dax_dev, vmf);
@@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
        default:
                return VM_FAULT_FALLBACK;
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&dax_srcu, id);
 
        return rc;
 }
@@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev)
         * Note, rcu is not protecting the liveness of dax_dev, rcu is
         * ensuring that any fault handlers that might have seen
         * dax_dev->alive == true, have completed.  Any fault handlers
-        * that start after synchronize_rcu() has started will abort
+        * that start after synchronize_srcu() has started will abort
         * upon seeing dax_dev->alive == false.
         */
        dax_dev->alive = false;
-       synchronize_rcu();
+       synchronize_srcu(&dax_srcu);
        unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
        cdev_del(cdev);
        device_unregister(dev);
index 932742e4cf23147e304d6df2a1c8f37c8f89d296..24c461dea7afb146a509e097b581aa2fdaede132 100644 (file)
@@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
 
                status = __gop_query32(sys_table_arg, gop32, &info, &size,
                                       &current_fb_base);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+               if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+                   info->pixel_format != PIXEL_BLT_ONLY) {
                        /*
                         * Systems that use the UEFI Console Splitter may
                         * provide multiple GOP devices, not all of which are
@@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
 
                status = __gop_query64(sys_table_arg, gop64, &info, &size,
                                       &current_fb_base);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+               if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+                   info->pixel_format != PIXEL_BLT_ONLY) {
                        /*
                         * Systems that use the UEFI Console Splitter may
                         * provide multiple GOP devices, not all of which are
index da48819ff2e6550c0a7d6206569d85e8a880c0c5..b78d9239e48fb0fc3b02129fe97795a6b94bed70 100644 (file)
@@ -1317,7 +1317,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        if (!fence) {
                event_free(gpu, event);
                ret = -ENOMEM;
-               goto out_pm_put;
+               goto out_unlock;
        }
 
        gpu->event[event].fence = fence;
@@ -1357,6 +1357,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        hangcheck_timer_reset(gpu);
        ret = 0;
 
+out_unlock:
        mutex_unlock(&gpu->lock);
 
 out_pm_put:
index b7d7721e72faddc2a2d4fc76d69d795b8053cfad..40af17ec6312533d4080cc1581faa4683a0405b9 100644 (file)
@@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
 {
        int ret;
 
-       if (vgpu->failsafe)
-               return 0;
-
        if (WARN_ON(bytes > 4))
                return -EINVAL;
 
index f1f426a97aa9d43826010d7be90f6bffdbe59426..d186c157f65fefe3c64b45b86f0ffa6ab824df2f 100644 (file)
@@ -775,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
                        _EL_OFFSET_STATUS_PTR);
 
        ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
-       ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
+       ctx_status_ptr.read_ptr = 0;
+       ctx_status_ptr.write_ptr = 0x7;
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
index 933a7c211a1c29ab77357119e37b0de2bb3dd521..dce8d15f706f58b4cf1019ddc1d5b609c903980c 100644 (file)
@@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
        struct gvt_firmware_header *h;
        void *firmware;
        void *p;
-       unsigned long size;
+       unsigned long size, crc32_start;
        int i;
        int ret;
 
-       size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
+       size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
        firmware = vzalloc(size);
        if (!firmware)
                return -ENOMEM;
@@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
 
        memcpy(gvt->firmware.mmio, p, info->mmio_size);
 
+       crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+       h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
+
        firmware_attr.size = size;
        firmware_attr.private = firmware;
 
@@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
 
        firmware->mmio = mem;
 
-       sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
+       sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
                 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
                 pdev->revision);
 
index 3b9d59e457ba7dbf2a1baffff7cd4f4c9aa75f3f..ef3baa0c4754566319a4706d50a77d2e1c6e2255 100644 (file)
@@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
        .vgpu_create = intel_gvt_create_vgpu,
        .vgpu_destroy = intel_gvt_destroy_vgpu,
        .vgpu_reset = intel_gvt_reset_vgpu,
+       .vgpu_activate = intel_gvt_activate_vgpu,
+       .vgpu_deactivate = intel_gvt_deactivate_vgpu,
 };
 
 /**
index 6dfc48b63b718b4c4e6f5c62794db5ce279b18a4..becae2fa3b29d9956cf69469968c6a1c7c74b770 100644 (file)
@@ -382,7 +382,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                                 unsigned int engine_mask);
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
-
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
 
 /* validating GM functions */
 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -449,6 +450,8 @@ struct intel_gvt_ops {
                                struct intel_vgpu_type *);
        void (*vgpu_destroy)(struct intel_vgpu *);
        void (*vgpu_reset)(struct intel_vgpu *);
+       void (*vgpu_activate)(struct intel_vgpu *);
+       void (*vgpu_deactivate)(struct intel_vgpu *);
 };
 
 
index d641214578a7dc6631e866bbc91c2d38f3e95a76..e466259034e24b2c62b82265978298c3500b79c5 100644 (file)
@@ -544,6 +544,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        if (ret)
                goto undo_group;
 
+       intel_gvt_ops->vgpu_activate(vgpu);
+
        atomic_set(&vgpu->vdev.released, 0);
        return ret;
 
@@ -569,6 +571,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
        if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
                return;
 
+       intel_gvt_ops->vgpu_deactivate(vgpu);
+
        ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
                                        &vgpu->vdev.iommu_notifier);
        WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
@@ -1340,13 +1344,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
 {
-       struct intel_vgpu *vgpu = info->vgpu;
-
-       if (!info) {
-               gvt_vgpu_err("kvmgt_guest_info invalid\n");
-               return false;
-       }
-
        kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
        kvm_put_kvm(info->kvm);
        kvmgt_protect_table_destroy(info);
index 41cfa5ccae84ce4020c6b2ff4a051ce8e17f6298..649ef280cc9a5bc10f4bebdc2f43c27e5249bd7f 100644 (file)
@@ -72,7 +72,7 @@ static struct {
        char *name;
 } vgpu_types[] = {
 /* Fixed vGPU type table */
-       { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
+       { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" },
        { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
        { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
        { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
@@ -179,20 +179,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
 }
 
 /**
- * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * intel_gvt_active_vgpu - activate a virtual GPU
  * @vgpu: virtual GPU
  *
- * This function is called when user wants to destroy a virtual GPU.
+ * This function is called when user wants to activate a virtual GPU.
  *
  */
-void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
+{
+       mutex_lock(&vgpu->gvt->lock);
+       vgpu->active = true;
+       mutex_unlock(&vgpu->gvt->lock);
+}
+
+/**
+ * intel_gvt_deactive_vgpu - deactivate a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to deactivate a virtual GPU.
+ * All virtual GPU runtime information will be destroyed.
+ *
+ */
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
 
        mutex_lock(&gvt->lock);
 
        vgpu->active = false;
-       idr_remove(&gvt->vgpu_idr, vgpu->id);
 
        if (atomic_read(&vgpu->running_workload_num)) {
                mutex_unlock(&gvt->lock);
@@ -201,6 +215,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        }
 
        intel_vgpu_stop_schedule(vgpu);
+
+       mutex_unlock(&gvt->lock);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+
+       mutex_lock(&gvt->lock);
+
+       WARN(vgpu->active, "vGPU is still active!\n");
+
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
        intel_vgpu_clean_sched_policy(vgpu);
        intel_vgpu_clean_gvt_context(vgpu);
        intel_vgpu_clean_execlist(vgpu);
@@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_clean_shadow_ctx;
 
-       vgpu->active = true;
        mutex_unlock(&gvt->lock);
 
        return vgpu;
index 1c75402a59c1377e7abd410f2dc58dcec0df7094..5c089b3c2a7efdb29343de77a43dfc1e1f720057 100644 (file)
@@ -1434,8 +1434,6 @@ static int i915_drm_suspend(struct drm_device *dev)
                goto out;
        }
 
-       intel_guc_suspend(dev_priv);
-
        intel_display_suspend(dev);
 
        intel_dp_mst_suspend(dev);
index 1e53c31b6826ec996b2d153e1dd32232b77dd9d7..46fcd8b7080aafca8d589ca25ef6d57a9dc27a48 100644 (file)
@@ -806,6 +806,7 @@ struct intel_csr {
        func(has_resource_streamer); \
        func(has_runtime_pm); \
        func(has_snoop); \
+       func(unfenced_needs_alignment); \
        func(cursor_needs_physical); \
        func(hws_needs_physical); \
        func(overlay_needs_physical); \
index 67b1fc5a03313b80bc9459543b8dec3743ea953b..fe531f90406241bfa2cfc89e6155f1ef25802404 100644 (file)
@@ -4348,6 +4348,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
        i915_gem_context_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
+       intel_guc_suspend(dev_priv);
+
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
        cancel_delayed_work_sync(&dev_priv->gt.retire_work);
 
index 30e0675fd7dab7949d3cb3cb498be852d7448ac9..15a15d00a6bfa07cbe93ac2669cefaee3cb5ed2c 100644 (file)
@@ -888,6 +888,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
        struct list_head ordered_vmas;
        struct list_head pinned_vmas;
        bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
+       bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
        int retry;
 
        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -908,7 +909,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
                if (!has_fenced_gpu_access)
                        entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
                need_fence =
-                       entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+                       (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
+                        needs_unfenced_map) &&
                        i915_gem_object_is_tiled(obj);
                need_mappable = need_fence || need_reloc_mappable(vma);
 
index 2801a4d5632491787009c4ae62130dc732215136..96e45a4d54410b085191e09ab33c6f4ceca0100a 100644 (file)
@@ -2704,7 +2704,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (unlikely(ggtt->do_idle_maps)) {
-               if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
+               if (i915_gem_wait_for_idle(dev_priv, 0)) {
                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
index e7c3c0318ff60f2bf60b3c5afce405d11ce54a5c..da70bfe97ec5843adbdac276e5c86fa608266087 100644 (file)
@@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
+       /* The timeline struct (as part of the ppgtt underneath a context)
+        * may be freed when the request is no longer in use by the GPU.
+        * We could extend the life of a context to beyond that of all
+        * fences, possibly keeping the hw resource around indefinitely,
+        * or we just give them a false name. Since
+        * dma_fence_ops.get_timeline_name is a debug feature, the occasional
+        * lie seems justifiable.
+        */
+       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+               return "signaled";
+
        return to_request(fence)->timeline->common->name;
 }
 
index d5d2b4c6ed382d687719a088d943580ccacbba15..70b3832a79dd40d066a6f2deb34cbc6d436559e1 100644 (file)
@@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
        BUG();
 }
 
+static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
+{
+       if (!unlock)
+               return;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       /* expedite the RCU grace period to free some request slabs */
+       synchronize_rcu_expedited();
+}
+
 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
@@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                intel_runtime_pm_put(dev_priv);
 
        i915_gem_retire_requests(dev_priv);
-       if (unlock)
-               mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       /* expedite the RCU grace period to free some request slabs */
-       synchronize_rcu_expedited();
+       i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
 
        return count;
 }
@@ -293,8 +301,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
                        count += obj->base.size >> PAGE_SHIFT;
        }
 
-       if (unlock)
-               mutex_unlock(&dev->struct_mutex);
+       i915_gem_shrinker_unlock(dev, unlock);
 
        return count;
 }
@@ -321,8 +328,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
                                         sc->nr_to_scan - freed,
                                         I915_SHRINK_BOUND |
                                         I915_SHRINK_UNBOUND);
-       if (unlock)
-               mutex_unlock(&dev->struct_mutex);
+
+       i915_gem_shrinker_unlock(dev, unlock);
 
        return freed;
 }
@@ -364,8 +371,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
                                         struct shrinker_lock_uninterruptible *slu)
 {
        dev_priv->mm.interruptible = slu->was_interruptible;
-       if (slu->unlock)
-               mutex_unlock(&dev_priv->drm.struct_mutex);
+       i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
 }
 
 static int
index ecb487b5356fe68696b19d3054dc61339a61f406..9bbbd4e83e3c5d99cb4cdc9d4422f586159826bf 100644 (file)
@@ -60,6 +60,7 @@
        .has_overlay = 1, .overlay_needs_physical = 1, \
        .has_gmch_display = 1, \
        .hws_needs_physical = 1, \
+       .unfenced_needs_alignment = 1, \
        .ring_mask = RENDER_RING, \
        GEN_DEFAULT_PIPEOFFSETS, \
        CURSOR_OFFSETS
@@ -101,6 +102,7 @@ static const struct intel_device_info intel_i915g_info = {
        .platform = INTEL_I915G, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_i915gm_info = {
@@ -112,6 +114,7 @@ static const struct intel_device_info intel_i915gm_info = {
        .supports_tv = 1,
        .has_fbc = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_i945g_info = {
@@ -120,6 +123,7 @@ static const struct intel_device_info intel_i945g_info = {
        .has_hotplug = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_i945gm_info = {
@@ -130,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
        .supports_tv = 1,
        .has_fbc = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
index a1b7eec58be2742e6d94e5566b09fbb61e54df9d..70964ca9251e04939225c87a773e9a6bf760b1ac 100644 (file)
@@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
         */
        if (WARN_ON(stream->sample_flags != props->sample_flags)) {
                ret = -ENODEV;
-               goto err_alloc;
+               goto err_flags;
        }
 
        list_add(&stream->link, &dev_priv->perf.streams);
@@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
 
 err_open:
        list_del(&stream->link);
+err_flags:
        if (stream->ops->destroy)
                stream->ops->destroy(stream);
 err_alloc:
@@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
                if (ret)
                        return ret;
 
+               if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
+                       DRM_DEBUG("Unknown i915 perf property ID\n");
+                       return -EINVAL;
+               }
+
                switch ((enum drm_i915_perf_property_id)id) {
                case DRM_I915_PERF_PROP_CTX_HANDLE:
                        props->single_context = 1;
@@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
                        props->oa_periodic = true;
                        props->oa_period_exponent = value;
                        break;
-               default:
+               case DRM_I915_PERF_PROP_MAX:
                        MISSING_CASE(id);
-                       DRM_DEBUG("Unknown i915 perf property ID\n");
                        return -EINVAL;
                }
 
index 471af3b480adc38a3a48c27d1999805836cf5630..47517a02f0a439125b3b3a769e6848a4c4928ca2 100644 (file)
@@ -670,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
 static struct intel_engine_cs *
 pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 {
-       struct intel_engine_cs *engine;
+       struct intel_engine_cs *engine =
+               container_of(pt, struct drm_i915_gem_request, priotree)->engine;
+
+       GEM_BUG_ON(!locked);
 
-       engine = container_of(pt,
-                             struct drm_i915_gem_request,
-                             priotree)->engine;
        if (engine != locked) {
-               if (locked)
-                       spin_unlock_irq(&locked->timeline->lock);
-               spin_lock_irq(&engine->timeline->lock);
+               spin_unlock(&locked->timeline->lock);
+               spin_lock(&engine->timeline->lock);
        }
 
        return engine;
@@ -686,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 
 static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 {
-       struct intel_engine_cs *engine = NULL;
+       struct intel_engine_cs *engine;
        struct i915_dependency *dep, *p;
        struct i915_dependency stack;
        LIST_HEAD(dfs);
@@ -720,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
        list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
                struct i915_priotree *pt = dep->signaler;
 
-               list_for_each_entry(p, &pt->signalers_list, signal_link)
+               /* Within an engine, there can be no cycle, but we may
+                * refer to the same dependency chain multiple times
+                * (redundant dependencies are not eliminated) and across
+                * engines.
+                */
+               list_for_each_entry(p, &pt->signalers_list, signal_link) {
+                       GEM_BUG_ON(p->signaler->priority < pt->priority);
                        if (prio > READ_ONCE(p->signaler->priority))
                                list_move_tail(&p->dfs_link, &dfs);
+               }
 
                list_safe_reset_next(dep, p, dfs_link);
-               if (!RB_EMPTY_NODE(&pt->node))
-                       continue;
-
-               engine = pt_lock_engine(pt, engine);
-
-               /* If it is not already in the rbtree, we can update the
-                * priority inplace and skip over it (and its dependencies)
-                * if it is referenced *again* as we descend the dfs.
-                */
-               if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
-                       pt->priority = prio;
-                       list_del_init(&dep->dfs_link);
-               }
        }
 
+       engine = request->engine;
+       spin_lock_irq(&engine->timeline->lock);
+
        /* Fifo and depth-first replacement ensure our deps execute before us */
        list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
                struct i915_priotree *pt = dep->signaler;
@@ -751,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
                if (prio <= pt->priority)
                        continue;
 
-               GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
-
                pt->priority = prio;
-               rb_erase(&pt->node, &engine->execlist_queue);
-               if (insert_request(pt, &engine->execlist_queue))
-                       engine->execlist_first = &pt->node;
+               if (!RB_EMPTY_NODE(&pt->node)) {
+                       rb_erase(&pt->node, &engine->execlist_queue);
+                       if (insert_request(pt, &engine->execlist_queue))
+                               engine->execlist_first = &pt->node;
+               }
        }
 
-       if (engine)
-               spin_unlock_irq(&engine->timeline->lock);
+       spin_unlock_irq(&engine->timeline->lock);
 
        /* XXX Do we need to preempt to make room for us and our deps? */
 }
@@ -1440,7 +1435,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
        GEM_BUG_ON(request->ctx != port[0].request->ctx);
 
        /* Reset WaIdleLiteRestore:bdw,skl as well */
-       request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
+       request->tail =
+               intel_ring_wrap(request->ring,
+                               request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
 }
 
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
index 13dccb18cd43ed85aee58a6a15618c515094a83b..8cb2078c5bfc4abc7aeaa7fe51974266edcc8016 100644 (file)
@@ -521,11 +521,17 @@ static inline void intel_ring_advance(struct intel_ring *ring)
         */
 }
 
+static inline u32
+intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+       return pos & (ring->size - 1);
+}
+
 static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
 {
        /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
        u32 offset = addr - ring->vaddr;
-       return offset & (ring->size - 1);
+       return intel_ring_wrap(ring, offset);
 }
 
 int __intel_ring_space(int head, int tail, int size);
index 0b4440ffbeae21a3d33e67ed1c727e00cf3884b3..a9182d5e60117321a1a354b9b97288d11cc13cf4 100644 (file)
@@ -995,7 +995,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
 {
        struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
        __drm_atomic_helper_plane_destroy_state(&asyw->state);
-       dma_fence_put(asyw->state.fence);
        kfree(asyw);
 }
 
@@ -1007,7 +1006,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
        if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
                return NULL;
        __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
-       asyw->state.fence = NULL;
        asyw->interval = 1;
        asyw->sema = armw->sema;
        asyw->ntfy = armw->ntfy;
@@ -2036,6 +2034,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
        u32 vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
        u32 hfrontp =  mode->hsync_start - mode->hdisplay;
        u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+       u32 blankus;
        struct nv50_head_mode *m = &asyh->mode;
 
        m->h.active = mode->htotal;
@@ -2049,9 +2048,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
        m->v.blanks = m->v.active - vfrontp - 1;
 
        /*XXX: Safe underestimate, even "0" works */
-       m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
-       m->v.blankus *= 1000;
-       m->v.blankus /= mode->clock;
+       blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
+       blankus *= 1000;
+       blankus /= mode->clock;
+       m->v.blankus = blankus;
 
        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
                m->v.blank2e =  m->v.active + m->v.synce + vbackp;
index 273562dd6bbdb1a138c2e73417fa0e071716749b..3b86a73995672220b5e71b0f39898129782abf28 100644 (file)
@@ -714,7 +714,7 @@ nv4a_chipset = {
        .i2c = nv04_i2c_new,
        .imem = nv40_instmem_new,
        .mc = nv44_mc_new,
-       .mmu = nv44_mmu_new,
+       .mmu = nv04_mmu_new,
        .pci = nv40_pci_new,
        .therm = nv40_therm_new,
        .timer = nv41_timer_new,
@@ -2271,6 +2271,35 @@ nv136_chipset = {
        .fifo = gp100_fifo_new,
 };
 
+static const struct nvkm_device_chip
+nv137_chipset = {
+       .name = "GP107",
+       .bar = gf100_bar_new,
+       .bios = nvkm_bios_new,
+       .bus = gf100_bus_new,
+       .devinit = gm200_devinit_new,
+       .fb = gp102_fb_new,
+       .fuse = gm107_fuse_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .ltc = gp100_ltc_new,
+       .mc = gp100_mc_new,
+       .mmu = gf100_mmu_new,
+       .pci = gp100_pci_new,
+       .pmu = gp102_pmu_new,
+       .timer = gk20a_timer_new,
+       .top = gk104_top_new,
+       .ce[0] = gp102_ce_new,
+       .ce[1] = gp102_ce_new,
+       .ce[2] = gp102_ce_new,
+       .ce[3] = gp102_ce_new,
+       .disp = gp102_disp_new,
+       .dma = gf119_dma_new,
+       .fifo = gp100_fifo_new,
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -2708,6 +2737,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x132: device->chip = &nv132_chipset; break;
                case 0x134: device->chip = &nv134_chipset; break;
                case 0x136: device->chip = &nv136_chipset; break;
+               case 0x137: device->chip = &nv137_chipset; break;
                default:
                        nvdev_error(device, "unknown chipset (%08x)\n", boot0);
                        goto done;
index 003ac915eaadad44c5b0ad2c1bbab9d18377596e..8a8895246d26a23be65eb6fb29cbb001f71f728b 100644 (file)
@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
                }
 
                if (type == 0x00000010) {
-                       if (!nv31_mpeg_mthd(mpeg, mthd, data))
+                       if (nv31_mpeg_mthd(mpeg, mthd, data))
                                show &= ~0x01000000;
                }
        }
index e536f37e24b0c75fbf7882eccae37c5b1d0aeae8..c3cf02ed468ea1ccf6212a5e9c42dbee00fa612e 100644 (file)
@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
                }
 
                if (type == 0x00000010) {
-                       if (!nv44_mpeg_mthd(subdev->device, mthd, data))
+                       if (nv44_mpeg_mthd(subdev->device, mthd, data))
                                show &= ~0x01000000;
                }
        }
index 917dcb978c2ccc921c1dfcf90329973ff3044b59..0c87b1ac6b68f0d41cfd01851a14b9a092455f4f 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/fb.h>
 #include <linux/prefetch.h>
+#include <asm/unaligned.h>
 
 #include <drm/drmP.h>
 #include "udl_drv.h"
@@ -163,7 +164,7 @@ static void udl_compress_hline16(
                        const u8 *const start = pixel;
                        const uint16_t repeating_pixel_val16 = pixel_val16;
 
-                       *(uint16_t *)cmd = cpu_to_be16(pixel_val16);
+                       put_unaligned_be16(pixel_val16, cmd);
 
                        cmd += 2;
                        pixel += bpp;
index 63ec1993eaaa905af1583f7169e6be812cf0486d..d162f0dc76e3f44e2134eafa5509137ddf0cc411 100644 (file)
@@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid)
                hid->group = HID_GROUP_WACOM;
                break;
        case USB_VENDOR_ID_SYNAPTICS:
-               if (hid->group == HID_GROUP_GENERIC ||
-                   hid->group == HID_GROUP_MULTITOUCH_WIN_8)
+               if (hid->group == HID_GROUP_GENERIC)
                        if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
                            && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
                                /*
@@ -2096,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
index 4e2648c86c8c56142cd06b6a269433f72c5c07c9..b26c030926c188aff2dd054cd2ae9e9910837887 100644 (file)
 #define USB_DEVICE_ID_UGEE_TABLET_45           0x0045
 #define USB_DEVICE_ID_YIYNOVA_TABLET           0x004d
 
+#define USB_VENDOR_ID_UGEE             0x28bd
+#define USB_DEVICE_ID_UGEE_TABLET_EX07S                0x0071
+
 #define USB_VENDOR_ID_UNITEC   0x227d
 #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709    0x0709
 #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19    0x0a19
index 1509d7287ff3e60e79c845214e47e901fae650f8..e3e6e5c893cc05e0c934c8c9f505de9fdd06e26a 100644 (file)
@@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev,
                }
                break;
        case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
+       case USB_DEVICE_ID_UGEE_TABLET_EX07S:
                /* If this is the pen interface */
                if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
                        rc = uclogic_tablet_enable(hdev);
@@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, uclogic_devices);
index 94250c293be2a18b247e2be006a0e7e4faf4f6f8..c68ac65db7ffec361169c326ede5bbf263a00b1b 100644 (file)
@@ -2006,7 +2006,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
                return;
        case HID_DG_TOOLSERIALNUMBER:
                wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
-               wacom_wac->serial[0] |= value;
+               wacom_wac->serial[0] |= (__u32)value;
                return;
        case WACOM_HID_WD_SENSE:
                wacom_wac->hid_data.sense_state = value;
@@ -2176,6 +2176,16 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
                wacom_wac->hid_data.cc_index = field->index;
                wacom_wac->hid_data.cc_value_index = usage->usage_index;
                break;
+       case HID_DG_CONTACTID:
+               if ((field->logical_maximum - field->logical_minimum) < touch_max) {
+                       /*
+                        * The HID descriptor for G11 sensors leaves logical
+                        * maximum set to '1' despite it being a multitouch
+                        * device. Override to a sensible number.
+                        */
+                       field->logical_maximum = 255;
+               }
+               break;
        }
 }
 
index 91cbe86b25c8ec2d693bea5452c04d0ffa73ba0b..fcbed35e95a824979bb48fba9f05614591e97491 100644 (file)
@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
                rx_wr->sg_list = &rx_desc->rx_sg;
                rx_wr->num_sge = 1;
                rx_wr->next = rx_wr + 1;
+               rx_desc->in_use = false;
        }
        rx_wr--;
        rx_wr->next = NULL; /* mark end of work requests list */
@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
        struct ib_recv_wr *rx_wr_failed, rx_wr;
        int ret;
 
+       if (!rx_desc->in_use) {
+               /*
+                * if the descriptor is not in-use we already reposted it
+                * for recv, so just silently return
+                */
+               return 0;
+       }
+
+       rx_desc->in_use = false;
        rx_wr.wr_cqe = &rx_desc->rx_cqe;
        rx_wr.sg_list = &rx_desc->rx_sg;
        rx_wr.num_sge = 1;
@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
                return;
        }
 
+       rx_desc->in_use = true;
+
        ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
                        ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 
@@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
        ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
        isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
 
-       if (ret)
-               transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
-       else
-               isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
+       if (ret) {
+               /*
+                * transport_generic_request_failure() expects to have
+                * plus two references to handle queue-full, so re-add
+                * one here as target-core will have already dropped
+                * it after the first isert_put_datain() callback.
+                */
+               kref_get(&cmd->cmd_kref);
+               transport_generic_request_failure(cmd, cmd->pi_err);
+       } else {
+               /*
+                * XXX: isert_put_response() failure is not retried.
+                */
+               ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
+               if (ret)
+                       pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
+       }
 }
 
 static void
@@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
        cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
        spin_unlock_bh(&cmd->istate_lock);
 
-       if (ret) {
-               target_put_sess_cmd(se_cmd);
-               transport_send_check_condition_and_sense(se_cmd,
-                                                        se_cmd->pi_err, 0);
-       } else {
+       /*
+        * transport_generic_request_failure() will drop the extra
+        * se_cmd->cmd_kref reference after T10-PI error, and handle
+        * any non-zero ->queue_status() callback error retries.
+        */
+       if (ret)
+               transport_generic_request_failure(se_cmd, se_cmd->pi_err);
+       else
                target_execute_cmd(se_cmd);
-       }
 }
 
 static void
@@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
                chain_wr = &isert_cmd->tx_desc.send_wr;
        }
 
-       isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
-       isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
-       return 1;
+       rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
+       isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
+                 isert_cmd, rc);
+       return rc;
 }
 
 static int
 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+       int ret;
 
        isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
                 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
 
        isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
-       isert_rdma_rw_ctx_post(isert_cmd, conn->context,
-                       &isert_cmd->tx_desc.tx_cqe, NULL);
+       ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
+                                    &isert_cmd->tx_desc.tx_cqe, NULL);
 
-       isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
-                isert_cmd);
-       return 0;
+       isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
+                isert_cmd, ret);
+       return ret;
 }
 
 static int
index c02ada57d7f5c4fa5b765a6401ac8e7bbd6096bb..87d994de8c910d998c12a4205c0e66eb1220b12d 100644 (file)
@@ -60,7 +60,7 @@
 
 #define ISER_RX_PAD_SIZE       (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
                (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
-                sizeof(struct ib_cqe)))
+                sizeof(struct ib_cqe) + sizeof(bool)))
 
 #define ISCSI_ISER_SG_TABLESIZE                256
 
@@ -85,6 +85,7 @@ struct iser_rx_desc {
        u64             dma_addr;
        struct ib_sge   rx_sg;
        struct ib_cqe   rx_cqe;
+       bool            in_use;
        char            pad[ISER_RX_PAD_SIZE];
 } __packed;
 
index 155fcb3b6230a01da6d1de3ab772e508b56fef78..153b1ee13e03eccd764d4a92a85bd579c0f89946 100644 (file)
@@ -202,6 +202,7 @@ static const struct xpad_device {
        { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
        { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+       { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
        { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
        { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
        { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -326,6 +327,7 @@ static struct usb_device_id xpad_table[] = {
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x146b),            /* BigBen Interactive Controllers */
        XPAD_XBOX360_VENDOR(0x1532),            /* Razer Sabertooth */
+       XPAD_XBOXONE_VENDOR(0x1532),            /* Razer Wildcat */
        XPAD_XBOX360_VENDOR(0x15e4),            /* Numark X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x162e),            /* Joytech X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x1689),            /* Razer Onza */
index efc8ec3423514ad33dd2ebaf0c861d41bb960140..e73d968023f7ce7de418dcf1315b7c554773d604 100644 (file)
@@ -1118,6 +1118,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
+ * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
  * Fujitsu T725            0x470f01        05, 12, 09      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
@@ -1523,6 +1524,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
                },
        },
+       {
+               /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
+               },
+       },
        {
                /* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
                .matches = {
index 312bd6ca919806f2593e12814e5037469d609c0f..09720d950686c844b49f1d7f32710e160d21624a 100644 (file)
@@ -620,6 +620,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
                },
        },
+       {
+               /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+               },
+       },
        { }
 };
 
index 15af9a9753e582b797a58de3c9713226800999f8..2d203b422129e53554d5be023595c6946877037c 100644 (file)
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
                return -ENOMEM;
        }
 
+       raw_spin_lock_init(&cd->rlock);
+
        cd->gpc_base = of_iomap(node, 0);
        if (!cd->gpc_base) {
                pr_err("fsl-gpcv2: unable to map gpc registers\n");
index e992a7f8a16fc3019016aa1f2844cfbfb437ad97..2b32b88949ba40dfb3901cf9588f670b7aad8441 100644 (file)
@@ -267,7 +267,7 @@ static void sdio_release_func(struct device *dev)
        sdio_free_func_cis(func);
 
        kfree(func->info);
-
+       kfree(func->tmpbuf);
        kfree(func);
 }
 
@@ -282,6 +282,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
        if (!func)
                return ERR_PTR(-ENOMEM);
 
+       /*
+        * allocate buffer separately to make sure it's properly aligned for
+        * DMA usage (incl. 64 bit DMA)
+        */
+       func->tmpbuf = kmalloc(4, GFP_KERNEL);
+       if (!func->tmpbuf) {
+               kfree(func);
+               return ERR_PTR(-ENOMEM);
+       }
+
        func->card = card;
 
        device_initialize(&func->dev);
index a9ac0b4573131f48cad46044e018b5de479cf695..8718432751c50c6d5a955b104c250b151430d19c 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ioport.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/stat.h>
@@ -1621,10 +1622,16 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
 
                if (card->type == MMC_TYPE_SDIO ||
                    card->type == MMC_TYPE_SD_COMBO) {
-                       set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
+                               pm_runtime_get_noresume(mmc->parent);
+                               set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       }
                        clk_en_a = clk_en_a_old & ~clken_low_pwr;
                } else {
-                       clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
+                               pm_runtime_put_noidle(mmc->parent);
+                               clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       }
                        clk_en_a = clk_en_a_old | clken_low_pwr;
                }
 
index 7123ef96ed18523c88553146035103f3517bd372..445fc47dc3e77e39b17e7531eb7dbed58ddd3279 100644 (file)
@@ -830,6 +830,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
 
        switch (uhs) {
        case MMC_TIMING_UHS_SDR50:
+       case MMC_TIMING_UHS_DDR50:
                pinctrl = imx_data->pins_100mhz;
                break;
        case MMC_TIMING_UHS_SDR104:
index 0134ba32a05784b65d1a0e6d470eee7a857df74c..39712560b4c1b55aa847f0ac69f6815edb11e678 100644 (file)
@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
                        return err;
        }
 
-       if (bytes == 0) {
-               err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
-               if (err)
-                       return err;
+       err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+       if (err)
+               return err;
 
+       if (bytes == 0) {
                err = clear_update_marker(ubi, vol, 0);
                if (err)
                        return err;
index 8a4ba8b88e52f9d5b1ba318e5dbfb53344f6ebca..34481c9be1d192137e2dc7e3c8475184dfa3bec0 100644 (file)
@@ -1104,11 +1104,11 @@ static void bond_compute_features(struct bonding *bond)
                gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
                gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
        }
+       bond_dev->hard_header_len = max_hard_header_len;
 
 done:
        bond_dev->vlan_features = vlan_features;
        bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
-       bond_dev->hard_header_len = max_hard_header_len;
        bond_dev->gso_max_segs = gso_max_segs;
        netif_set_gso_max_size(bond_dev, gso_max_size);
 
index 138f5ae75c0bc6fcf912d30975197caedb0e0b3e..4d1fe8d9504234f436a0b9dd739b41089f563225 100644 (file)
@@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
        int work_done = 0;
 
        u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
-       u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
+       u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
        u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
 
        /* Handle bus state changes */
index caed4e6960f8c77fdca254f5ed4e7ccbe69792fc..11662f479e760ba77f613c90bfc8026b005da3ea 100644 (file)
@@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev)
 
        devm_can_led_init(ndev);
 
-       dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
-                priv->regs, ndev->irq);
+       dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
 
        return 0;
 fail_candev:
index 8483a40e7e9ef52327e15e03fa4100c73f68a53f..5f9e0e6301d06ecbe466e2f4eb4e8b7fee8ece27 100644 (file)
@@ -72,6 +72,8 @@ config CAN_PEAK_USB
          PCAN-USB Pro         dual CAN 2.0b channels USB adapter
          PCAN-USB FD          single CAN-FD channel USB adapter
          PCAN-USB Pro FD      dual CAN-FD channels USB adapter
+         PCAN-Chip USB        CAN-FD to USB stamp module
+         PCAN-USB X6          6 CAN-FD channels USB adapter
 
          (see also http://www.peak-system.com).
 
index 300349fe8dc04945d956ec0dd17a470aa7ddb426..eecee7f8dfb70763aa0e7c3f90f85ae66ff85385 100644 (file)
@@ -739,13 +739,18 @@ static const struct net_device_ops gs_usb_netdev_ops = {
 static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
 {
        struct gs_can *dev = netdev_priv(netdev);
-       struct gs_identify_mode imode;
+       struct gs_identify_mode *imode;
        int rc;
 
+       imode = kmalloc(sizeof(*imode), GFP_KERNEL);
+
+       if (!imode)
+               return -ENOMEM;
+
        if (do_identify)
-               imode.mode = GS_CAN_IDENTIFY_ON;
+               imode->mode = GS_CAN_IDENTIFY_ON;
        else
-               imode.mode = GS_CAN_IDENTIFY_OFF;
+               imode->mode = GS_CAN_IDENTIFY_OFF;
 
        rc = usb_control_msg(interface_to_usbdev(dev->iface),
                             usb_sndctrlpipe(interface_to_usbdev(dev->iface),
@@ -755,10 +760,12 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
                             USB_RECIP_INTERFACE,
                             dev->channel,
                             0,
-                            &imode,
-                            sizeof(imode),
+                            imode,
+                            sizeof(*imode),
                             100);
 
+       kfree(imode);
+
        return (rc > 0) ? 0 : rc;
 }
 
index 0b0302af3bd2dc3da893ed120afa028d21b219da..57913dbbae0a970f5f28051d10f064792d69333c 100644 (file)
@@ -39,6 +39,7 @@ static struct usb_device_id peak_usb_table[] = {
        {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
        {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
        {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
+       {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID)},
        {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)},
        {} /* Terminating entry */
 };
@@ -51,6 +52,7 @@ static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
        &pcan_usb_pro,
        &pcan_usb_fd,
        &pcan_usb_pro_fd,
+       &pcan_usb_chip,
        &pcan_usb_x6,
 };
 
index 3cbfb069893d5ce1d2da16969426433240ad4ce6..c01316cac354b364a422b3312efa7756771223b8 100644 (file)
@@ -27,6 +27,7 @@
 #define PCAN_USBPRO_PRODUCT_ID         0x000d
 #define PCAN_USBPROFD_PRODUCT_ID       0x0011
 #define PCAN_USBFD_PRODUCT_ID          0x0012
+#define PCAN_USBCHIP_PRODUCT_ID                0x0013
 #define PCAN_USBX6_PRODUCT_ID          0x0014
 
 #define PCAN_USB_DRIVER_NAME           "peak_usb"
@@ -90,6 +91,7 @@ struct peak_usb_adapter {
 extern const struct peak_usb_adapter pcan_usb;
 extern const struct peak_usb_adapter pcan_usb_pro;
 extern const struct peak_usb_adapter pcan_usb_fd;
+extern const struct peak_usb_adapter pcan_usb_chip;
 extern const struct peak_usb_adapter pcan_usb_pro_fd;
 extern const struct peak_usb_adapter pcan_usb_x6;
 
index 304732550f0a628a7fa9ae9c94e67113d9a869a0..528d3bb4917f1ecb5caf2ef67a71ae8adad51c44 100644 (file)
@@ -1061,6 +1061,78 @@ const struct peak_usb_adapter pcan_usb_fd = {
        .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
 };
 
+/* describes the PCAN-CHIP USB */
+static const struct can_bittiming_const pcan_usb_chip_const = {
+       .name = "pcan_chip_usb",
+       .tseg1_min = 1,
+       .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
+       .tseg2_min = 1,
+       .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+       .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
+       .brp_min = 1,
+       .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
+       .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_chip_data_const = {
+       .name = "pcan_chip_usb",
+       .tseg1_min = 1,
+       .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
+       .tseg2_min = 1,
+       .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
+       .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
+       .brp_min = 1,
+       .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
+       .brp_inc = 1,
+};
+
+const struct peak_usb_adapter pcan_usb_chip = {
+       .name = "PCAN-Chip USB",
+       .device_id = PCAN_USBCHIP_PRODUCT_ID,
+       .ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
+       .ctrlmode_supported = CAN_CTRLMODE_FD |
+               CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+       .clock = {
+               .freq = PCAN_UFD_CRYSTAL_HZ,
+       },
+       .bittiming_const = &pcan_usb_chip_const,
+       .data_bittiming_const = &pcan_usb_chip_data_const,
+
+       /* size of device private data */
+       .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+
+       /* timestamps usage */
+       .ts_used_bits = 32,
+       .ts_period = 1000000, /* calibration period in ts. */
+       .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
+       .us_per_ts_shift = 0,
+
+       /* give here messages in/out endpoints */
+       .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
+       .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0},
+
+       /* size of rx/tx usb buffers */
+       .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
+       .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
+
+       /* device callbacks */
+       .intf_probe = pcan_usb_pro_probe,       /* same as PCAN-USB Pro */
+       .dev_init = pcan_usb_fd_init,
+
+       .dev_exit = pcan_usb_fd_exit,
+       .dev_free = pcan_usb_fd_free,
+       .dev_set_bus = pcan_usb_fd_set_bus,
+       .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
+       .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+       .dev_decode_buf = pcan_usb_fd_decode_buf,
+       .dev_start = pcan_usb_fd_start,
+       .dev_stop = pcan_usb_fd_stop,
+       .dev_restart_async = pcan_usb_fd_restart_async,
+       .dev_encode_msg = pcan_usb_fd_encode_msg,
+
+       .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
+};
+
 /* describes the PCAN-USB Pro FD adapter */
 static const struct can_bittiming_const pcan_usb_pro_fd_const = {
        .name = "pcan_usb_pro_fd",
index 8cf4801994e883be64010934a0413cfbdb86ed16..fa0eece21eef9825e716dd7744c556211a0b41c3 100644 (file)
@@ -326,6 +326,7 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
 
 static void b53_set_forwarding(struct b53_device *dev, int enable)
 {
+       struct dsa_switch *ds = dev->ds;
        u8 mgmt;
 
        b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
@@ -336,6 +337,15 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
                mgmt &= ~SM_SW_FWD_EN;
 
        b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+
+       /* Include IMP port in dumb forwarding mode when no tagging protocol is
+        * set
+        */
+       if (ds->ops->get_tag_protocol(ds) == DSA_TAG_PROTO_NONE) {
+               b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+               mgmt |= B53_MII_DUMB_FWDG_EN;
+               b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+       }
 }
 
 static void b53_enable_vlan(struct b53_device *dev, bool enable)
@@ -598,7 +608,8 @@ static void b53_switch_reset_gpio(struct b53_device *dev)
 
 static int b53_switch_reset(struct b53_device *dev)
 {
-       u8 mgmt;
+       unsigned int timeout = 1000;
+       u8 mgmt, reg;
 
        b53_switch_reset_gpio(dev);
 
@@ -607,6 +618,28 @@ static int b53_switch_reset(struct b53_device *dev)
                b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
        }
 
+       /* This is specific to 58xx devices here, do not use is58xx() which
+        * covers the larger Starfigther 2 family, including 7445/7278 which
+        * still use this driver as a library and need to perform the reset
+        * earlier.
+        */
+       if (dev->chip_id == BCM58XX_DEVICE_ID) {
+               b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
+               reg |= SW_RST | EN_SW_RST | EN_CH_RST;
+               b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
+
+               do {
+                       b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
+                       if (!(reg & SW_RST))
+                               break;
+
+                       usleep_range(1000, 2000);
+               } while (timeout-- > 0);
+
+               if (timeout == 0)
+                       return -ETIMEDOUT;
+       }
+
        b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
 
        if (!(mgmt & SM_SW_FWD_EN)) {
@@ -1731,7 +1764,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
                .vlans  = 4096,
                .enabled_ports = 0x1ff,
                .arl_entries = 4,
-               .cpu_port = B53_CPU_PORT_25,
+               .cpu_port = B53_CPU_PORT,
                .vta_regs = B53_VTA_REGS,
                .duplex_reg = B53_DUPLEX_STAT_GE,
                .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
index 9fd24c418fa4256e8517d5dec1dfa97681ba72db..e5c86d44667af1fd9f68a550f169a322989f39bd 100644 (file)
 #define  B53_UC_FWD_EN                 BIT(6)
 #define  B53_MC_FWD_EN                 BIT(7)
 
+/* Switch control (8 bit) */
+#define B53_SWITCH_CTRL                        0x22
+#define  B53_MII_DUMB_FWDG_EN          BIT(6)
+
 /* (16 bit) */
 #define B53_UC_FLOOD_MASK              0x32
 #define B53_MC_FLOOD_MASK              0x34
 /* Software reset register (8 bit) */
 #define B53_SOFTRESET                  0x79
 #define   SW_RST                       BIT(7)
+#define   EN_CH_RST                    BIT(6)
 #define   EN_SW_RST                    BIT(4)
 
 /* Fast Aging Control register (8 bit) */
index 64a1095e4d1495c1e32c3ff7008882789f6b6f6e..a0ca68ce3fbb164ea6e6a2c75a36a8c1ae54172d 100644 (file)
@@ -134,6 +134,7 @@ static void set_max_bgx_per_node(struct pci_dev *pdev)
        pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
        switch (sdevid) {
        case PCI_SUBSYS_DEVID_81XX_BGX:
+       case PCI_SUBSYS_DEVID_81XX_RGX:
                max_bgx_per_node = MAX_BGX_PER_CN81XX;
                break;
        case PCI_SUBSYS_DEVID_83XX_BGX:
index c5080f2cead5d0efc435fd827038eb7dbe4b5830..6b7fe6fdd13b9b27b4a1573e8c7b3869b17bcc19 100644 (file)
@@ -16,6 +16,7 @@
 /* Subsystem device IDs */
 #define PCI_SUBSYS_DEVID_88XX_BGX              0xA126
 #define PCI_SUBSYS_DEVID_81XX_BGX              0xA226
+#define PCI_SUBSYS_DEVID_81XX_RGX              0xA254
 #define PCI_SUBSYS_DEVID_83XX_BGX              0xA326
 
 #define    MAX_BGX_THUNDER                     8 /* Max 2 nodes, 4 per node */
index 9e757684816d48b903f62cdac2d6a1123e6c3305..93949139e62cf92a7e593b36d6a7399edbd1b6e6 100644 (file)
@@ -613,7 +613,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        struct mtk_mac *mac = netdev_priv(dev);
        struct mtk_eth *eth = mac->hw;
        struct mtk_tx_dma *itxd, *txd;
-       struct mtk_tx_buf *tx_buf;
+       struct mtk_tx_buf *itx_buf, *tx_buf;
        dma_addr_t mapped_addr;
        unsigned int nr_frags;
        int i, n_desc = 1;
@@ -627,8 +627,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
        txd4 |= fport;
 
-       tx_buf = mtk_desc_to_tx_buf(ring, itxd);
-       memset(tx_buf, 0, sizeof(*tx_buf));
+       itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+       memset(itx_buf, 0, sizeof(*itx_buf));
 
        if (gso)
                txd4 |= TX_DMA_TSO;
@@ -647,9 +647,11 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
                return -ENOMEM;
 
        WRITE_ONCE(itxd->txd1, mapped_addr);
-       tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-       dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
+       itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+       itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+                         MTK_TX_FLAGS_FPORT1;
+       dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
+       dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
 
        /* TX SG offload */
        txd = itxd;
@@ -685,11 +687,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
                                               last_frag * TX_DMA_LS0));
                        WRITE_ONCE(txd->txd4, fport);
 
-                       tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
                        tx_buf = mtk_desc_to_tx_buf(ring, txd);
                        memset(tx_buf, 0, sizeof(*tx_buf));
-
+                       tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
                        tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+                       tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+                                        MTK_TX_FLAGS_FPORT1;
+
                        dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
                        dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
                        frag_size -= frag_map_size;
@@ -698,7 +702,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        }
 
        /* store skb to cleanup */
-       tx_buf->skb = skb;
+       itx_buf->skb = skb;
 
        WRITE_ONCE(itxd->txd4, txd4);
        WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
@@ -1012,17 +1016,16 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
 
        while ((cpu != dma) && budget) {
                u32 next_cpu = desc->txd2;
-               int mac;
+               int mac = 0;
 
                desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
                if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
                        break;
 
-               mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
-                      TX_DMA_FPORT_MASK;
-               mac--;
-
                tx_buf = mtk_desc_to_tx_buf(ring, desc);
+               if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
+                       mac = 1;
+
                skb = tx_buf->skb;
                if (!skb) {
                        condition = 1;
index 99b1c8e9f16f981a0603f906280dcd98f7fa1b54..08285a96ff7077f83b5ac530e8f59266922819d1 100644 (file)
@@ -406,12 +406,18 @@ struct mtk_hw_stats {
        struct u64_stats_sync   syncp;
 };
 
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
 enum mtk_tx_flags {
+       /* PDMA descriptor can point at 1-2 segments. This enum allows us to
+        * track how memory was allocated so that it can be freed properly.
+        */
        MTK_TX_FLAGS_SINGLE0    = 0x01,
        MTK_TX_FLAGS_PAGE0      = 0x02,
+
+       /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
+        * SKB out instead of looking up through hardware TX descriptor.
+        */
+       MTK_TX_FLAGS_FPORT0     = 0x04,
+       MTK_TX_FLAGS_FPORT1     = 0x08,
 };
 
 /* This enum allows us to identify how the clock is defined on the array of the
index dc52053128bc752ccd398449330c24c0bdf8b3a1..3d9490cd2db19720d2b06f90d4cd322a3c87f4b0 100644 (file)
@@ -90,7 +90,7 @@
 #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
 
 #define MLX5_UMR_ALIGN                         (2048)
-#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD      (128)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD      (256)
 
 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 #define MLX5E_DEFAULT_LRO_TIMEOUT                       32
index d55fff0ba388f746809ac601fc3863e94309fc12..26fc77e80f7b38d45e52911233d5678da6334eb1 100644 (file)
@@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i
        int idx = 0;
        int err = 0;
 
+       info->data = MAX_NUM_OF_ETHTOOL_RULES;
        while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
                err = mlx5e_ethtool_get_flow(priv, info, location);
                if (!err)
index 66c133757a5ee8daae122e93322306b1c5c44336..15cc7b469d2ed9c066ee11a94a17a8ea5c9709f5 100644 (file)
@@ -174,7 +174,7 @@ unlock:
 
 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 {
-       struct mlx5e_sw_stats *s = &priv->stats.sw;
+       struct mlx5e_sw_stats temp, *s = &temp;
        struct mlx5e_rq_stats *rq_stats;
        struct mlx5e_sq_stats *sq_stats;
        u64 tx_offload_none = 0;
@@ -229,6 +229,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
        s->link_down_events_phy = MLX5_GET(ppcnt_reg,
                                priv->stats.pport.phy_counters,
                                counter_set.phys_layer_cntrs.link_down_events);
+       memcpy(&priv->stats.sw, s, sizeof(*s));
 }
 
 static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
@@ -243,7 +244,6 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
        MLX5_SET(query_vport_counter_in, in, op_mod, 0);
        MLX5_SET(query_vport_counter_in, in, other_vport, 0);
 
-       memset(out, 0, outlen);
        mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
 }
 
index fade7233dac5256cb69d0fc67b0c3cb1a4444da5..5436866798f447eef43dac9af242decae4ed6017 100644 (file)
@@ -639,7 +639,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
 
        if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
            rep->vport != FDB_UPLINK_VPORT) {
-               if (min_inline > esw->offloads.inline_mode) {
+               if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
+                   esw->offloads.inline_mode < min_inline) {
                        netdev_warn(priv->netdev,
                                    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
                                    min_inline, esw->offloads.inline_mode);
@@ -785,16 +786,15 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
        return 0;
 }
 
-static int gen_vxlan_header_ipv4(struct net_device *out_dev,
-                                char buf[],
-                                unsigned char h_dest[ETH_ALEN],
-                                int ttl,
-                                __be32 daddr,
-                                __be32 saddr,
-                                __be16 udp_dst_port,
-                                __be32 vx_vni)
+static void gen_vxlan_header_ipv4(struct net_device *out_dev,
+                                 char buf[], int encap_size,
+                                 unsigned char h_dest[ETH_ALEN],
+                                 int ttl,
+                                 __be32 daddr,
+                                 __be32 saddr,
+                                 __be16 udp_dst_port,
+                                 __be32 vx_vni)
 {
-       int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
        struct ethhdr *eth = (struct ethhdr *)buf;
        struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
        struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
@@ -817,20 +817,17 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
        udp->dest = udp_dst_port;
        vxh->vx_flags = VXLAN_HF_VNI;
        vxh->vx_vni = vxlan_vni_field(vx_vni);
-
-       return encap_size;
 }
 
-static int gen_vxlan_header_ipv6(struct net_device *out_dev,
-                                char buf[],
-                                unsigned char h_dest[ETH_ALEN],
-                                int ttl,
-                                struct in6_addr *daddr,
-                                struct in6_addr *saddr,
-                                __be16 udp_dst_port,
-                                __be32 vx_vni)
+static void gen_vxlan_header_ipv6(struct net_device *out_dev,
+                                 char buf[], int encap_size,
+                                 unsigned char h_dest[ETH_ALEN],
+                                 int ttl,
+                                 struct in6_addr *daddr,
+                                 struct in6_addr *saddr,
+                                 __be16 udp_dst_port,
+                                 __be32 vx_vni)
 {
-       int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
        struct ethhdr *eth = (struct ethhdr *)buf;
        struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
        struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
@@ -852,8 +849,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev,
        udp->dest = udp_dst_port;
        vxh->vx_flags = VXLAN_HF_VNI;
        vxh->vx_vni = vxlan_vni_field(vx_vni);
-
-       return encap_size;
 }
 
 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
@@ -862,13 +857,20 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
                                          struct net_device **out_dev)
 {
        int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+       int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
        struct ip_tunnel_key *tun_key = &e->tun_info.key;
-       int encap_size, ttl, err;
        struct neighbour *n = NULL;
        struct flowi4 fl4 = {};
        char *encap_header;
+       int ttl, err;
+
+       if (max_encap_size < ipv4_encap_size) {
+               mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+                              ipv4_encap_size, max_encap_size);
+               return -EOPNOTSUPP;
+       }
 
-       encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+       encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
        if (!encap_header)
                return -ENOMEM;
 
@@ -903,11 +905,11 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
 
        switch (e->tunnel_type) {
        case MLX5_HEADER_TYPE_VXLAN:
-               encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
-                                                  e->h_dest, ttl,
-                                                  fl4.daddr,
-                                                  fl4.saddr, tun_key->tp_dst,
-                                                  tunnel_id_to_key32(tun_key->tun_id));
+               gen_vxlan_header_ipv4(*out_dev, encap_header,
+                                     ipv4_encap_size, e->h_dest, ttl,
+                                     fl4.daddr,
+                                     fl4.saddr, tun_key->tp_dst,
+                                     tunnel_id_to_key32(tun_key->tun_id));
                break;
        default:
                err = -EOPNOTSUPP;
@@ -915,7 +917,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
        }
 
        err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-                              encap_size, encap_header, &e->encap_id);
+                              ipv4_encap_size, encap_header, &e->encap_id);
 out:
        if (err && n)
                neigh_release(n);
@@ -930,13 +932,20 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
 
 {
        int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+       int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
        struct ip_tunnel_key *tun_key = &e->tun_info.key;
-       int encap_size, err, ttl = 0;
        struct neighbour *n = NULL;
        struct flowi6 fl6 = {};
        char *encap_header;
+       int err, ttl = 0;
+
+       if (max_encap_size < ipv6_encap_size) {
+               mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+                              ipv6_encap_size, max_encap_size);
+               return -EOPNOTSUPP;
+       }
 
-       encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+       encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
        if (!encap_header)
                return -ENOMEM;
 
@@ -972,11 +981,11 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
 
        switch (e->tunnel_type) {
        case MLX5_HEADER_TYPE_VXLAN:
-               encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
-                                                  e->h_dest, ttl,
-                                                  &fl6.daddr,
-                                                  &fl6.saddr, tun_key->tp_dst,
-                                                  tunnel_id_to_key32(tun_key->tun_id));
+               gen_vxlan_header_ipv6(*out_dev, encap_header,
+                                     ipv6_encap_size, e->h_dest, ttl,
+                                     &fl6.daddr,
+                                     &fl6.saddr, tun_key->tp_dst,
+                                     tunnel_id_to_key32(tun_key->tun_id));
                break;
        default:
                err = -EOPNOTSUPP;
@@ -984,7 +993,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
        }
 
        err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-                              encap_size, encap_header, &e->encap_id);
+                              ipv6_encap_size, encap_header, &e->encap_id);
 out:
        if (err && n)
                neigh_release(n);
index 307ec6c5fd3b62dffe6cfd5f2541dad9fc155fc3..d111cebca9f1ea57d57a70b108a436af1adcc6aa 100644 (file)
@@ -911,8 +911,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
        int num_vports = esw->enabled_vports;
-       int err;
-       int vport;
+       int err, vport;
        u8 mlx5_mode;
 
        if (!MLX5_CAP_GEN(dev, vport_group_manager))
@@ -921,9 +920,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
        if (esw->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
-       if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
-           MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+       switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
+       case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+               if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
+                       return 0;
+               /* fall through */
+       case MLX5_CAP_INLINE_MODE_L2:
+               esw_warn(dev, "Inline mode can't be set\n");
                return -EOPNOTSUPP;
+       case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+               break;
+       }
 
        if (esw->offloads.num_flows > 0) {
                esw_warn(dev, "Can't set inline mode when flows are configured\n");
@@ -966,18 +973,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
        if (esw->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
-       if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
-           MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
-               return -EOPNOTSUPP;
-
        return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
 }
 
 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
 {
+       u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
        struct mlx5_core_dev *dev = esw->dev;
        int vport;
-       u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
 
        if (!MLX5_CAP_GEN(dev, vport_group_manager))
                return -EOPNOTSUPP;
@@ -985,10 +988,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
        if (esw->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
 
-       if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
-           MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
-               return -EOPNOTSUPP;
+       switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
+       case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+               mlx5_mode = MLX5_INLINE_MODE_NONE;
+               goto out;
+       case MLX5_CAP_INLINE_MODE_L2:
+               mlx5_mode = MLX5_INLINE_MODE_L2;
+               goto out;
+       case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+               goto query_vports;
+       }
 
+query_vports:
        for (vport = 1; vport <= nvfs; vport++) {
                mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
                if (vport > 1 && prev_mlx5_mode != mlx5_mode)
@@ -996,6 +1007,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
                prev_mlx5_mode = mlx5_mode;
        }
 
+out:
        *mode = mlx5_mode;
        return 0;
 }
index 60154a175bd3866f2b461a357c60d86283afde12..0ad66324247f71c212f44f98679c80f05a2650d3 100644 (file)
@@ -1029,7 +1029,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        if (err) {
                dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
                        FW_INIT_TIMEOUT_MILI);
-               goto out_err;
+               goto err_cmd_cleanup;
        }
 
        err = mlx5_core_enable_hca(dev, 0);
index 2e6b0f290ddc2cbf3beeb2f5c1fe813378691c69..222b25908d012614639b4e9ef2a8a7727a82cd18 100644 (file)
@@ -87,6 +87,7 @@ static void up_rel_func(struct kref *kref)
        struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
 
        list_del(&up->list);
+       iounmap(up->map);
        if (mlx5_cmd_free_uar(up->mdev, up->index))
                mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
        kfree(up->reg_bitmap);
index 5bd36a4a8fcdfd201b40321c7fefb82776cd347d..cfdadb658ade0fe73551f6f822ec8ae1805f83de 100644 (file)
        ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
 
 static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
-       {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
+       {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI},
+       {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE},
+       {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE},
+       {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE},
+       {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH},
 };
 
 static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
@@ -583,6 +583,13 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
                   p_params->ets_cbs,
                   p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
 
+       if (p_params->ets_enabled && !p_params->max_ets_tc) {
+               p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
+               DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+                          "ETS params: max_ets_tc is forced to %d\n",
+               p_params->max_ets_tc);
+       }
+
        /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
         * encoded in a type u32 array of size 2.
         */
@@ -1001,6 +1008,8 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
        u8 pfc_map = 0;
        int i;
 
+       *pfc &= ~DCBX_PFC_ERROR_MASK;
+
        if (p_params->pfc.willing)
                *pfc |= DCBX_PFC_WILLING_MASK;
        else
@@ -1255,7 +1264,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
 {
        struct qed_dcbx_get *dcbx_info;
 
-       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
        if (!dcbx_info)
                return NULL;
 
@@ -2073,6 +2082,8 @@ static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
        for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
                dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
 
+       dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap;
+
        ptt = qed_ptt_acquire(hwfn);
        if (!ptt)
                return -EINVAL;
index 8cfc4a54f2dc69240ae1fc42195ef854e0b1c2c9..3cd7989c007dfe46947e2ddb366a904f1af90198 100644 (file)
@@ -1516,11 +1516,12 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                spin_unlock_irqrestore(&priv->lock, flags);
                return NETDEV_TX_BUSY;
        }
-       entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
-       priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
 
        if (skb_put_padto(skb, ETH_ZLEN))
-               goto drop;
+               goto exit;
+
+       entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+       priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
 
        buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
                 entry / NUM_TX_DESC * DPTR_ALIGN;
index 54248775f227b062addf85044f486ad4512039f5..f68c4db656eda84691b411cee940528a01a2bb62 100644 (file)
@@ -1127,12 +1127,70 @@ static struct mdiobb_ops bb_ops = {
        .get_mdio_data = sh_get_mdio,
 };
 
+/* free Tx skb function */
+static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       struct sh_eth_txdesc *txdesc;
+       int free_num = 0;
+       int entry;
+       bool sent;
+
+       for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
+               entry = mdp->dirty_tx % mdp->num_tx_ring;
+               txdesc = &mdp->tx_ring[entry];
+               sent = !(txdesc->status & cpu_to_le32(TD_TACT));
+               if (sent_only && !sent)
+                       break;
+               /* TACT bit must be checked before all the following reads */
+               dma_rmb();
+               netif_info(mdp, tx_done, ndev,
+                          "tx entry %d status 0x%08x\n",
+                          entry, le32_to_cpu(txdesc->status));
+               /* Free the original skb. */
+               if (mdp->tx_skbuff[entry]) {
+                       dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+                                        le32_to_cpu(txdesc->len) >> 16,
+                                        DMA_TO_DEVICE);
+                       dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
+                       mdp->tx_skbuff[entry] = NULL;
+                       free_num++;
+               }
+               txdesc->status = cpu_to_le32(TD_TFP);
+               if (entry >= mdp->num_tx_ring - 1)
+                       txdesc->status |= cpu_to_le32(TD_TDLE);
+
+               if (sent) {
+                       ndev->stats.tx_packets++;
+                       ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
+               }
+       }
+       return free_num;
+}
+
 /* free skb and descriptor buffer */
 static void sh_eth_ring_free(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        int ringsize, i;
 
+       if (mdp->rx_ring) {
+               for (i = 0; i < mdp->num_rx_ring; i++) {
+                       if (mdp->rx_skbuff[i]) {
+                               struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
+
+                               dma_unmap_single(&ndev->dev,
+                                                le32_to_cpu(rxdesc->addr),
+                                                ALIGN(mdp->rx_buf_sz, 32),
+                                                DMA_FROM_DEVICE);
+                       }
+               }
+               ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+               dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+                                 mdp->rx_desc_dma);
+               mdp->rx_ring = NULL;
+       }
+
        /* Free Rx skb ringbuffer */
        if (mdp->rx_skbuff) {
                for (i = 0; i < mdp->num_rx_ring; i++)
@@ -1141,27 +1199,18 @@ static void sh_eth_ring_free(struct net_device *ndev)
        kfree(mdp->rx_skbuff);
        mdp->rx_skbuff = NULL;
 
-       /* Free Tx skb ringbuffer */
-       if (mdp->tx_skbuff) {
-               for (i = 0; i < mdp->num_tx_ring; i++)
-                       dev_kfree_skb(mdp->tx_skbuff[i]);
-       }
-       kfree(mdp->tx_skbuff);
-       mdp->tx_skbuff = NULL;
-
-       if (mdp->rx_ring) {
-               ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
-               dma_free_coherent(NULL, ringsize, mdp->rx_ring,
-                                 mdp->rx_desc_dma);
-               mdp->rx_ring = NULL;
-       }
-
        if (mdp->tx_ring) {
+               sh_eth_tx_free(ndev, false);
+
                ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
                dma_free_coherent(NULL, ringsize, mdp->tx_ring,
                                  mdp->tx_desc_dma);
                mdp->tx_ring = NULL;
        }
+
+       /* Free Tx skb ringbuffer */
+       kfree(mdp->tx_skbuff);
+       mdp->tx_skbuff = NULL;
 }
 
 /* format skb and descriptor buffer */
@@ -1409,43 +1458,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
        update_mac_address(ndev);
 }
 
-/* free Tx skb function */
-static int sh_eth_txfree(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       struct sh_eth_txdesc *txdesc;
-       int free_num = 0;
-       int entry;
-
-       for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
-               entry = mdp->dirty_tx % mdp->num_tx_ring;
-               txdesc = &mdp->tx_ring[entry];
-               if (txdesc->status & cpu_to_le32(TD_TACT))
-                       break;
-               /* TACT bit must be checked before all the following reads */
-               dma_rmb();
-               netif_info(mdp, tx_done, ndev,
-                          "tx entry %d status 0x%08x\n",
-                          entry, le32_to_cpu(txdesc->status));
-               /* Free the original skb. */
-               if (mdp->tx_skbuff[entry]) {
-                       dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
-                                        le32_to_cpu(txdesc->len) >> 16,
-                                        DMA_TO_DEVICE);
-                       dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
-                       mdp->tx_skbuff[entry] = NULL;
-                       free_num++;
-               }
-               txdesc->status = cpu_to_le32(TD_TFP);
-               if (entry >= mdp->num_tx_ring - 1)
-                       txdesc->status |= cpu_to_le32(TD_TDLE);
-
-               ndev->stats.tx_packets++;
-               ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
-       }
-       return free_num;
-}
-
 /* Packet receive function */
 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 {
@@ -1690,7 +1702,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
                           intr_status, mdp->cur_tx, mdp->dirty_tx,
                           (u32)ndev->state, edtrr);
                /* dirty buffer free */
-               sh_eth_txfree(ndev);
+               sh_eth_tx_free(ndev, true);
 
                /* SH7712 BUG */
                if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
@@ -1751,7 +1763,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                /* Clear Tx interrupts */
                sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
 
-               sh_eth_txfree(ndev);
+               sh_eth_tx_free(ndev, true);
                netif_wake_queue(ndev);
        }
 
@@ -2412,7 +2424,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        spin_lock_irqsave(&mdp->lock, flags);
        if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
-               if (!sh_eth_txfree(ndev)) {
+               if (!sh_eth_tx_free(ndev, true)) {
                        netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
                        netif_stop_queue(ndev);
                        spin_unlock_irqrestore(&mdp->lock, flags);
index 50d28261b6b9ea22f42c26be0e9f0e0bed194109..b9cb697b281847a83aa511c577a6c790516f8012 100644 (file)
@@ -1371,6 +1371,13 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
                free_cpumask_var(thread_mask);
        }
 
+       if (count > EFX_MAX_RX_QUEUES) {
+               netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+                              "Reducing number of rx queues from %u to %u.\n",
+                              count, EFX_MAX_RX_QUEUES);
+               count = EFX_MAX_RX_QUEUES;
+       }
+
        /* If RSS is requested for the PF *and* VFs then we can't write RSS
         * table entries that are inaccessible to VFs
         */
index ee14662415c5dfc827a02cb577794af495b0433c..a0c52e3281024b566dfe4b58e3014f26aadb0eaf 100644 (file)
@@ -74,7 +74,10 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 #define EFX_RXQ_MIN_ENT                128U
 #define EFX_TXQ_MIN_ENT(efx)   (2 * efx_tx_max_skb_descs(efx))
 
-#define EFX_TXQ_MAX_ENT(efx)   (EFX_WORKAROUND_35388(efx) ? \
+/* All EF10 architecture NICs steal one bit of the DMAQ size for various
+ * other purposes when counting TxQ entries, so we halve the queue size.
+ */
+#define EFX_TXQ_MAX_ENT(efx)   (EFX_WORKAROUND_EF10(efx) ? \
                                 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
 
 static inline bool efx_rss_enabled(struct efx_nic *efx)
index f5e5cd1659a148fb63ce2078ef13a5ae12d048bc..29614da91cbf919f91841d8e644ab4b246741ec7 100644 (file)
@@ -1354,6 +1354,13 @@ static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
                free_cpumask_var(thread_mask);
        }
 
+       if (count > EF4_MAX_RX_QUEUES) {
+               netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+                              "Reducing number of rx queues from %u to %u.\n",
+                              count, EF4_MAX_RX_QUEUES);
+               count = EF4_MAX_RX_QUEUES;
+       }
+
        return count;
 }
 
index 103f827a16231e058160ea8e283adc51823a1928..c67fa18b8121091de9396b29eea68f9134206ce2 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
+#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
 #define EFX_WORKAROUND_10G(efx) 1
 
 /* Bit-bashed I2C reads cause performance drop */
index 9e631952b86f3d4ddd9108e1fe0db7c96d8363d2..48a541eb0af20f9a0db46c0b7a94cdcc709b0b04 100644 (file)
@@ -76,7 +76,7 @@ config TI_CPSW
 config TI_CPTS
        bool "TI Common Platform Time Sync (CPTS) Support"
        depends on TI_CPSW || TI_KEYSTONE_NETCP
-       depends on PTP_1588_CLOCK
+       depends on POSIX_TIMERS
        ---help---
          This driver supports the Common Platform Time Sync unit of
          the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
@@ -87,6 +87,8 @@ config TI_CPTS_MOD
        tristate
        depends on TI_CPTS
        default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
+       select NET_PTP_CLASSIFY
+       imply PTP_1588_CLOCK
        default m
 
 config TI_KEYSTONE_NETCP
index a45f98fa4aa70a6ce0c693bef2fda248754a313b..3dadee1080b9e2e541d4b1a335671eb40d8c8205 100644 (file)
@@ -1017,8 +1017,8 @@ tc35815_free_queues(struct net_device *dev)
                        BUG_ON(lp->tx_skbs[i].skb != skb);
 #endif
                        if (skb) {
-                               dev_kfree_skb(skb);
                                pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
+                               dev_kfree_skb(skb);
                                lp->tx_skbs[i].skb = NULL;
                                lp->tx_skbs[i].skb_dma = 0;
                        }
index f9f3dba7a58800d9288199b50bec0b85d18cb249..db23cb36ae5cb9ec50493b00329f276ecdd22ae9 100644 (file)
@@ -751,7 +751,6 @@ struct netvsc_device {
        u32 send_section_cnt;
        u32 send_section_size;
        unsigned long *send_section_map;
-       int map_words;
 
        /* Used for NetVSP initialization protocol */
        struct completion channel_init_wait;
index 8dd0b87703288ccc5f067e495f5b297cde00fba1..15ef713d96c0887ec7929ff8d4be3ec3a6cac291 100644 (file)
@@ -236,6 +236,7 @@ static int netvsc_init_buf(struct hv_device *device)
        struct netvsc_device *net_device;
        struct nvsp_message *init_packet;
        struct net_device *ndev;
+       size_t map_words;
        int node;
 
        net_device = get_outbound_net_device(device);
@@ -401,11 +402,9 @@ static int netvsc_init_buf(struct hv_device *device)
                   net_device->send_section_size, net_device->send_section_cnt);
 
        /* Setup state for managing the send buffer. */
-       net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
-                                            BITS_PER_LONG);
+       map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
 
-       net_device->send_section_map = kcalloc(net_device->map_words,
-                                              sizeof(ulong), GFP_KERNEL);
+       net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
        if (net_device->send_section_map == NULL) {
                ret = -ENOMEM;
                goto cleanup;
@@ -683,7 +682,7 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
        unsigned long *map_addr = net_device->send_section_map;
        unsigned int i;
 
-       for_each_clear_bit(i, map_addr, net_device->map_words) {
+       for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
                if (sync_test_and_set_bit(i, map_addr) == 0)
                        return i;
        }
index ff0a5ed3ca803551a0350303af44976d0f47dcfc..49ce4e9f4a0f387dac1792252ecd5044b6373cd4 100644 (file)
@@ -617,7 +617,8 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
 
 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
                                             unsigned char **iv,
-                                            struct scatterlist **sg)
+                                            struct scatterlist **sg,
+                                            int num_frags)
 {
        size_t size, iv_offset, sg_offset;
        struct aead_request *req;
@@ -629,7 +630,7 @@ static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
 
        size = ALIGN(size, __alignof__(struct scatterlist));
        sg_offset = size;
-       size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+       size += sizeof(struct scatterlist) * num_frags;
 
        tmp = kmalloc(size, GFP_ATOMIC);
        if (!tmp)
@@ -649,6 +650,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 {
        int ret;
        struct scatterlist *sg;
+       struct sk_buff *trailer;
        unsigned char *iv;
        struct ethhdr *eth;
        struct macsec_eth_header *hh;
@@ -723,7 +725,14 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
                return ERR_PTR(-EINVAL);
        }
 
-       req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
+       ret = skb_cow_data(skb, 0, &trailer);
+       if (unlikely(ret < 0)) {
+               macsec_txsa_put(tx_sa);
+               kfree_skb(skb);
+               return ERR_PTR(ret);
+       }
+
+       req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
        if (!req) {
                macsec_txsa_put(tx_sa);
                kfree_skb(skb);
@@ -732,7 +741,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 
        macsec_fill_iv(iv, secy->sci, pn);
 
-       sg_init_table(sg, MAX_SKB_FRAGS + 1);
+       sg_init_table(sg, ret);
        skb_to_sgvec(skb, sg, 0, skb->len);
 
        if (tx_sc->encrypt) {
@@ -917,6 +926,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
 {
        int ret;
        struct scatterlist *sg;
+       struct sk_buff *trailer;
        unsigned char *iv;
        struct aead_request *req;
        struct macsec_eth_header *hdr;
@@ -927,7 +937,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
+       ret = skb_cow_data(skb, 0, &trailer);
+       if (unlikely(ret < 0)) {
+               kfree_skb(skb);
+               return ERR_PTR(ret);
+       }
+       req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
        if (!req) {
                kfree_skb(skb);
                return ERR_PTR(-ENOMEM);
@@ -936,7 +951,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        hdr = (struct macsec_eth_header *)skb->data;
        macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
 
-       sg_init_table(sg, MAX_SKB_FRAGS + 1);
+       sg_init_table(sg, ret);
        skb_to_sgvec(skb, sg, 0, skb->len);
 
        if (hdr->tci_an & MACSEC_TCI_E) {
index 9261722960a719a8e6d46f4ea5b39f500a0817e8..b34eaaae03fd3f289aab4a90b11c05c587858ad2 100644 (file)
@@ -1139,6 +1139,7 @@ static int macvlan_port_create(struct net_device *dev)
 static void macvlan_port_destroy(struct net_device *dev)
 {
        struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+       struct sk_buff *skb;
 
        dev->priv_flags &= ~IFF_MACVLAN_PORT;
        netdev_rx_handler_unregister(dev);
@@ -1147,7 +1148,15 @@ static void macvlan_port_destroy(struct net_device *dev)
         * but we need to cancel it and purge left skbs if any.
         */
        cancel_work_sync(&port->bc_work);
-       __skb_queue_purge(&port->bc_queue);
+
+       while ((skb = __skb_dequeue(&port->bc_queue))) {
+               const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+
+               if (src)
+                       dev_put(src->dev);
+
+               kfree_skb(skb);
+       }
 
        kfree(port);
 }
index e2460a57e4b1105ed398e207aa8cdfd84d03707d..ed0d10f54f2607533868dfd10e6bc9d0e09050de 100644 (file)
@@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
                skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
                skb_queue_tail(&dp83640->rx_queue, skb);
                schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
-       } else {
-               netif_rx_ni(skb);
        }
 
        return true;
index 6742070ca676f57694a9a6cb11364941deb520a0..da5b392683703b9ece67a24ebcb59aadeba7cc8e 100644 (file)
@@ -297,17 +297,6 @@ static int kszphy_config_init(struct phy_device *phydev)
        if (priv->led_mode >= 0)
                kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
 
-       if (phy_interrupt_is_valid(phydev)) {
-               int ctl = phy_read(phydev, MII_BMCR);
-
-               if (ctl < 0)
-                       return ctl;
-
-               ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE);
-               if (ret < 0)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -798,9 +787,6 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -940,9 +926,6 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -952,6 +935,7 @@ static struct phy_driver ksphy_driver[] = {
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .driver_data    = &ksz9021_type,
+       .probe          = kszphy_probe,
        .config_init    = ksz9021_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
@@ -971,6 +955,7 @@ static struct phy_driver ksphy_driver[] = {
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .driver_data    = &ksz9021_type,
+       .probe          = kszphy_probe,
        .config_init    = ksz9031_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = ksz9031_read_status,
@@ -989,9 +974,6 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -1003,9 +985,6 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -1017,9 +996,6 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 } };
index a2bfc82e95d70ba645a1f0975ad835ca7c0991cf..97ff1278167bc455af890e9b6c16d7b9d659ea57 100644 (file)
@@ -591,16 +591,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
 EXPORT_SYMBOL(phy_mii_ioctl);
 
 /**
- * phy_start_aneg - start auto-negotiation for this PHY device
+ * phy_start_aneg_priv - start auto-negotiation for this PHY device
  * @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
  *
  * Description: Sanitizes the settings (if we're not autonegotiating
  *   them), and then calls the driver's config_aneg function.
  *   If the PHYCONTROL Layer is operating, we change the state to
  *   reflect the beginning of Auto-negotiation or forcing.
  */
-int phy_start_aneg(struct phy_device *phydev)
+static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
 {
+       bool trigger = 0;
        int err;
 
        if (!phydev->drv)
@@ -628,10 +630,40 @@ int phy_start_aneg(struct phy_device *phydev)
                }
        }
 
+       /* Re-schedule a PHY state machine to check PHY status because
+        * negotiation may already be done and aneg interrupt may not be
+        * generated.
+        */
+       if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+               err = phy_aneg_done(phydev);
+               if (err > 0) {
+                       trigger = true;
+                       err = 0;
+               }
+       }
+
 out_unlock:
        mutex_unlock(&phydev->lock);
+
+       if (trigger)
+               phy_trigger_machine(phydev, sync);
+
        return err;
 }
+
+/**
+ * phy_start_aneg - start auto-negotiation for this PHY device
+ * @phydev: the phy_device struct
+ *
+ * Description: Sanitizes the settings (if we're not autonegotiating
+ *   them), and then calls the driver's config_aneg function.
+ *   If the PHYCONTROL Layer is operating, we change the state to
+ *   reflect the beginning of Auto-negotiation or forcing.
+ */
+int phy_start_aneg(struct phy_device *phydev)
+{
+       return phy_start_aneg_priv(phydev, true);
+}
 EXPORT_SYMBOL(phy_start_aneg);
 
 /**
@@ -659,7 +691,7 @@ void phy_start_machine(struct phy_device *phydev)
  *   state machine runs.
  */
 
-static void phy_trigger_machine(struct phy_device *phydev, bool sync)
+void phy_trigger_machine(struct phy_device *phydev, bool sync)
 {
        if (sync)
                cancel_delayed_work_sync(&phydev->state_queue);
@@ -1154,7 +1186,7 @@ void phy_state_machine(struct work_struct *work)
        mutex_unlock(&phydev->lock);
 
        if (needs_aneg)
-               err = phy_start_aneg(phydev);
+               err = phy_start_aneg_priv(phydev, false);
        else if (do_suspend)
                phy_suspend(phydev);
 
index 1b52520715aec6f972626361a6aa93accf809301..85c01247f2e3858f33e901de1a37d3eaae0941d7 100644 (file)
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
 #define TEAM_ENC_FEATURES      (NETIF_F_HW_CSUM | NETIF_F_SG | \
                                 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
 
-static void ___team_compute_features(struct team *team)
+static void __team_compute_features(struct team *team)
 {
        struct team_port *port;
        u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team)
                team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
 }
 
-static void __team_compute_features(struct team *team)
-{
-       ___team_compute_features(team);
-       netdev_change_features(team->dev);
-}
-
 static void team_compute_features(struct team *team)
 {
        mutex_lock(&team->lock);
-       ___team_compute_features(team);
+       __team_compute_features(team);
        mutex_unlock(&team->lock);
        netdev_change_features(team->dev);
 }
@@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev)
        team_notify_peers_fini(team);
        team_queue_override_fini(team);
        mutex_unlock(&team->lock);
+       netdev_change_features(dev);
 }
 
 static void team_destructor(struct net_device *dev)
@@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
        mutex_lock(&team->lock);
        err = team_port_add(team, port_dev);
        mutex_unlock(&team->lock);
+
+       if (!err)
+               netdev_change_features(dev);
+
        return err;
 }
 
@@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
        mutex_lock(&team->lock);
        err = team_port_del(team, port_dev);
        mutex_unlock(&team->lock);
+
+       if (!err)
+               netdev_change_features(dev);
+
        return err;
 }
 
@@ -2358,8 +2361,10 @@ start_again:
 
        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
                          TEAM_CMD_OPTIONS_GET);
-       if (!hdr)
+       if (!hdr) {
+               nlmsg_free(skb);
                return -EMSGSIZE;
+       }
 
        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
                goto nla_put_failure;
@@ -2631,8 +2636,10 @@ start_again:
 
        hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
                          TEAM_CMD_PORT_LIST_GET);
-       if (!hdr)
+       if (!hdr) {
+               nlmsg_free(skb);
                return -EMSGSIZE;
+       }
 
        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
                goto nla_put_failure;
index 3dd490f53e485a399a4531c6681c030b1f5dcde4..f28bd74ac275a039a000cc01b0448d8866f14d1f 100644 (file)
@@ -369,7 +369,7 @@ config USB_NET_NET1080
          optionally with LEDs that indicate traffic
 
 config USB_NET_PLUSB
-       tristate "Prolific PL-2301/2302/25A1 based cables"
+       tristate "Prolific PL-2301/2302/25A1/27A1 based cables"
        # if the handshake/init/reset problems, from original 'plusb',
        # are ever resolved ... then remove "experimental"
        depends on USB_USBNET
index 8a40202c0a1732850da1b2eb64af21470b9c85b4..c4f1c363e24b89404c6834312074f8a4451ded50 100644 (file)
@@ -254,14 +254,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
        tx_overhead = 0x40;
 
        len = skb->len;
-       if (skb_headroom(skb) < tx_overhead) {
-               struct sk_buff *skb2;
-
-               skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
+       if (skb_cow_head(skb, tx_overhead)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        __skb_push(skb, tx_overhead);
index e221bfcee76b40a3ad7ba60ec4d348f4b8f4cc73..947bea81d924124c3827e87f75e732e35adb2acd 100644 (file)
@@ -293,12 +293,9 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 {
        int len = skb->len;
 
-       if (skb_headroom(skb) < 2) {
-               struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
+       if (skb_cow_head(skb, 2)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
        skb_push(skb, 2);
 
index 4f2e8141dbe2e53eb23a2b60124e2821b2897fce..00067a0c51ca45b736920a3c56ee42cb24b08271 100644 (file)
@@ -2534,13 +2534,6 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
        SET_NETDEV_DEV(net, &interface->dev);
        SET_NETDEV_DEVTYPE(net, &hso_type);
 
-       /* registering our net device */
-       result = register_netdev(net);
-       if (result) {
-               dev_err(&interface->dev, "Failed to register device\n");
-               goto exit;
-       }
-
        /* start allocating */
        for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
                hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL);
@@ -2560,6 +2553,13 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
 
        add_net_device(hso_dev);
 
+       /* registering our net device */
+       result = register_netdev(net);
+       if (result) {
+               dev_err(&interface->dev, "Failed to register device\n");
+               goto exit;
+       }
+
        hso_log_port(hso_dev);
 
        hso_create_rfkill(hso_dev, interface);
@@ -3279,9 +3279,9 @@ static void __exit hso_exit(void)
        pr_info("unloaded\n");
 
        tty_unregister_driver(tty_drv);
-       put_tty_driver(tty_drv);
        /* deregister the usb driver */
        usb_deregister(&hso_driver);
+       put_tty_driver(tty_drv);
 }
 
 /* Module definitions */
index 876f02f4945eafdc2fb5cfa0f9dcb54d9b498af4..2a2c3edb6bad0b3bd257c3a101d100ad3b00cc59 100644 (file)
@@ -803,18 +803,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
        }
 
        /* We now decide whether we can put our special header into the sk_buff */
-       if (skb_cloned(skb) || skb_headroom(skb) < 2) {
-               /* no such luck - we make our own */
-               struct sk_buff *copied_skb;
-               copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
-               dev_kfree_skb_irq(skb);
-               skb = copied_skb;
-               if (!copied_skb) {
-                       kaweth->stats.tx_errors++;
-                       netif_start_queue(net);
-                       spin_unlock_irq(&kaweth->device_lock);
-                       return NETDEV_TX_OK;
-               }
+       if (skb_cow_head(skb, 2)) {
+               kaweth->stats.tx_errors++;
+               netif_start_queue(net);
+               spin_unlock_irq(&kaweth->device_lock);
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
        }
 
        private_header = (__le16 *)__skb_push(skb, 2);
index 9889a70ff4f6fece5bfabbfb45a3470f721a5a32..636f48f19d1eacae67c050de4fc3e651bffdf825 100644 (file)
@@ -2607,14 +2607,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
 {
        u32 tx_cmd_a, tx_cmd_b;
 
-       if (skb_headroom(skb) < TX_OVERHEAD) {
-               struct sk_buff *skb2;
-
-               skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
+       if (skb_cow_head(skb, TX_OVERHEAD)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        if (lan78xx_linearize(skb) < 0)
index 22e1a9a99a7d8cad77b22410973575ee17699b2b..6fe59373cba9b8bd1afce514265171dbbd43aa9e 100644 (file)
@@ -102,7 +102,7 @@ static int pl_reset(struct usbnet *dev)
 }
 
 static const struct driver_info        prolific_info = {
-       .description =  "Prolific PL-2301/PL-2302/PL-25A1",
+       .description =  "Prolific PL-2301/PL-2302/PL-25A1/PL-27A1",
        .flags =        FLAG_POINTTOPOINT | FLAG_NO_SETINT,
                /* some PL-2302 versions seem to fail usb_set_interface() */
        .reset =        pl_reset,
@@ -139,6 +139,17 @@ static const struct usb_device_id  products [] = {
                                         * Host-to-Host Cable
                                         */
        .driver_info =  (unsigned long) &prolific_info,
+
+},
+
+/* super speed cables */
+{
+       USB_DEVICE(0x067b, 0x27a1),     /* PL-27A1, no eeprom
+                                        * also: goobay Active USB 3.0
+                                        * Data Link,
+                                        * Unitek Y-3501
+                                        */
+       .driver_info =  (unsigned long) &prolific_info,
 },
 
        { },            // END
@@ -158,5 +169,5 @@ static struct usb_driver plusb_driver = {
 module_usb_driver(plusb_driver);
 
 MODULE_AUTHOR("David Brownell");
-MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
+MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver");
 MODULE_LICENSE("GPL");
index 156f7f85e4860d682d679df68bfe8cfe2a3d4b3b..2474618404f5e592c0fe56d38c30c8988e1ed8ef 100644 (file)
@@ -908,7 +908,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
-       {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},    /* Telit LE920 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
        {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},    /* XS Stick W100-2 from 4G Systems */
        {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)},    /* Olivetti Olicard 100 */
        {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)},    /* Olivetti Olicard 120 */
index 0b17b40d7a4fa2653caf21406c4a6b3b45d868b0..190de9a90f7387c5070c7f589aa18bb7d05ac5d7 100644 (file)
@@ -2203,13 +2203,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
 {
        u32 tx_cmd_a, tx_cmd_b;
 
-       if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
-               struct sk_buff *skb2 =
-                       skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
+       if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
index 831aa33d078ae7d2dd57fdded5de71d1eb915f99..5f19fb0f025d9449d0ba20958610e0d1f083f032 100644 (file)
@@ -2001,13 +2001,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
        /* We do not advertise SG, so skbs should be already linearized */
        BUG_ON(skb_shinfo(skb)->nr_frags);
 
-       if (skb_headroom(skb) < overhead) {
-               struct sk_buff *skb2 = skb_copy_expand(skb,
-                       overhead, 0, flags);
+       /* Make writable and expand header space by overhead if required */
+       if (skb_cow_head(skb, overhead)) {
+               /* Must deallocate here as returning NULL to indicate error
+                * means the skb won't be deallocated in the caller.
+                */
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        if (csum) {
index 4a1e9c489f1f455388ffee289d65e1d6b36cba42..aadfe1d1c37ee67e2a7d17c759db12dad248c41d 100644 (file)
@@ -456,14 +456,9 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 
        len = skb->len;
 
-       if (skb_headroom(skb) < SR_TX_OVERHEAD) {
-               struct sk_buff *skb2;
-
-               skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
+       if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        __skb_push(skb, SR_TX_OVERHEAD);
index 3de65ea6531a8add927c0a2d7c74e8923c0f3274..453244805c52e570394673d7a3ebd71cc62fd5ca 100644 (file)
@@ -1929,7 +1929,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
                   " value=0x%04x index=0x%04x size=%d\n",
                   cmd, reqtype, value, index, size);
 
-       if (data) {
+       if (size) {
                buf = kmalloc(size, GFP_KERNEL);
                if (!buf)
                        goto out;
@@ -1938,8 +1938,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
        err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
                              cmd, reqtype, value, index, buf, size,
                              USB_CTRL_GET_TIMEOUT);
-       if (err > 0 && err <= size)
-               memcpy(data, buf, err);
+       if (err > 0 && err <= size) {
+        if (data)
+            memcpy(data, buf, err);
+        else
+            netdev_dbg(dev->net,
+                "Huh? Data requested but thrown away.\n");
+    }
        kfree(buf);
 out:
        return err;
@@ -1960,7 +1965,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
                buf = kmemdup(data, size, GFP_KERNEL);
                if (!buf)
                        goto out;
-       }
+       } else {
+        if (size) {
+            WARN_ON_ONCE(1);
+            err = -EINVAL;
+            goto out;
+        }
+    }
 
        err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
                              cmd, reqtype, value, index, buf, size,
index ea9890d619670e1abfba75fe608c2925d824cb1c..f36584616e7d6825c7e69137b4a31a3d55779688 100644 (file)
@@ -2230,14 +2230,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
 #define MIN_MTU ETH_MIN_MTU
 #define MAX_MTU ETH_MAX_MTU
 
-static int virtnet_probe(struct virtio_device *vdev)
+static int virtnet_validate(struct virtio_device *vdev)
 {
-       int i, err;
-       struct net_device *dev;
-       struct virtnet_info *vi;
-       u16 max_queue_pairs;
-       int mtu;
-
        if (!vdev->config->get) {
                dev_err(&vdev->dev, "%s failure: config access disabled\n",
                        __func__);
@@ -2247,6 +2241,25 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (!virtnet_validate_features(vdev))
                return -EINVAL;
 
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
+               int mtu = virtio_cread16(vdev,
+                                        offsetof(struct virtio_net_config,
+                                                 mtu));
+               if (mtu < MIN_MTU)
+                       __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
+       }
+
+       return 0;
+}
+
+static int virtnet_probe(struct virtio_device *vdev)
+{
+       int i, err;
+       struct net_device *dev;
+       struct virtnet_info *vi;
+       u16 max_queue_pairs;
+       int mtu;
+
        /* Find if host supports multiqueue virtio_net device */
        err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
                                   struct virtio_net_config,
@@ -2362,11 +2375,20 @@ static int virtnet_probe(struct virtio_device *vdev)
                                     offsetof(struct virtio_net_config,
                                              mtu));
                if (mtu < dev->min_mtu) {
-                       __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
-               } else {
-                       dev->mtu = mtu;
-                       dev->max_mtu = mtu;
+                       /* Should never trigger: MTU was previously validated
+                        * in virtnet_validate.
+                        */
+                       dev_err(&vdev->dev, "device MTU appears to have changed "
+                               "it is now %d < %d", mtu, dev->min_mtu);
+                       goto free_stats;
                }
+
+               dev->mtu = mtu;
+               dev->max_mtu = mtu;
+
+               /* TODO: size buffers correctly in this case. */
+               if (dev->mtu > ETH_DATA_LEN)
+                       vi->big_packets = true;
        }
 
        if (vi->any_header_sg)
@@ -2544,6 +2566,7 @@ static struct virtio_driver virtio_net_driver = {
        .driver.name =  KBUILD_MODNAME,
        .driver.owner = THIS_MODULE,
        .id_table =     id_table,
+       .validate =     virtnet_validate,
        .probe =        virtnet_probe,
        .remove =       virtnet_remove,
        .config_changed = virtnet_config_changed,
index d6988db1930d6b38db5f932a81c6c64b9c76776a..7d909c8183e95a62b6f8a3182d3ce645a264e909 100644 (file)
@@ -1128,7 +1128,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
                goto nla_put_failure;
 
        /* rule only needs to appear once */
-       nlh->nlmsg_flags &= NLM_F_EXCL;
+       nlh->nlmsg_flags |= NLM_F_EXCL;
 
        frh = nlmsg_data(nlh);
        memset(frh, 0, sizeof(*frh));
index 23d4a1728cdfa4993903b6b4504c6a70f5a7fff4..351bac8f65031edf831741159c01e860c89bb4a5 100644 (file)
@@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
        rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
        if (rc < 0)
                goto out_unlock;
+       nvdimm_bus_unlock(&nvdimm_bus->dev);
+
        if (copy_to_user(p, buf, buf_len))
                rc = -EFAULT;
+
+       vfree(buf);
+       return rc;
+
  out_unlock:
        nvdimm_bus_unlock(&nvdimm_bus->dev);
  out:
index b3323c0697f6239ebbfe757137cde8352fe3c480..ca6d572c48fcb62d7c8a83e4874f18b625caaeb8 100644 (file)
@@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
        }
 
        if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
-               if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) {
+               /*
+                * FIXME: nsio_rw_bytes() may be called from atomic
+                * context in the btt case and nvdimm_clear_poison()
+                * takes a sleeping lock. Until the locking can be
+                * reworked this capability requires that the namespace
+                * is not claimed by btt.
+                */
+               if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
+                               && (!ndns->claim || !is_nd_btt(ndns->claim))) {
                        long cleared;
 
                        cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
index 0eedc49e0d473ed36b5ef9832760aa8498b9f146..8b721321be5b1cb291e780ae9a5ed7ea5ad67e09 100644 (file)
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
 
 int alias_dpa_busy(struct device *dev, void *data)
 {
-       resource_size_t map_end, blk_start, new, busy;
+       resource_size_t map_end, blk_start, new;
        struct blk_alloc_info *info = data;
        struct nd_mapping *nd_mapping;
        struct nd_region *nd_region;
@@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data)
  retry:
        /*
         * Find the free dpa from the end of the last pmem allocation to
-        * the end of the interleave-set mapping that is not already
-        * covered by a blk allocation.
+        * the end of the interleave-set mapping.
         */
-       busy = 0;
        for_each_dpa_resource(ndd, res) {
+               if (strncmp(res->name, "pmem", 4) != 0)
+                       continue;
                if ((res->start >= blk_start && res->start < map_end)
                                || (res->end >= blk_start
                                        && res->end <= map_end)) {
-                       if (strncmp(res->name, "pmem", 4) == 0) {
-                               new = max(blk_start, min(map_end + 1,
-                                                       res->end + 1));
-                               if (new != blk_start) {
-                                       blk_start = new;
-                                       goto retry;
-                               }
-                       } else
-                               busy += min(map_end, res->end)
-                                       - max(nd_mapping->start, res->start) + 1;
-               } else if (nd_mapping->start > res->start
-                               && map_end < res->end) {
-                       /* total eclipse of the PMEM region mapping */
-                       busy += nd_mapping->size;
-                       break;
+                       new = max(blk_start, min(map_end + 1, res->end + 1));
+                       if (new != blk_start) {
+                               blk_start = new;
+                               goto retry;
+                       }
                }
        }
 
@@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data)
                return 1;
        }
 
-       info->available -= blk_start - nd_mapping->start + busy;
+       info->available -= blk_start - nd_mapping->start;
 
        return 0;
 }
 
-static int blk_dpa_busy(struct device *dev, void *data)
-{
-       struct blk_alloc_info *info = data;
-       struct nd_mapping *nd_mapping;
-       struct nd_region *nd_region;
-       resource_size_t map_end;
-       int i;
-
-       if (!is_nd_pmem(dev))
-               return 0;
-
-       nd_region = to_nd_region(dev);
-       for (i = 0; i < nd_region->ndr_mappings; i++) {
-               nd_mapping  = &nd_region->mapping[i];
-               if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
-                       break;
-       }
-
-       if (i >= nd_region->ndr_mappings)
-               return 0;
-
-       map_end = nd_mapping->start + nd_mapping->size - 1;
-       if (info->res->start >= nd_mapping->start
-                       && info->res->start < map_end) {
-               if (info->res->end <= map_end) {
-                       info->busy = 0;
-                       return 1;
-               } else {
-                       info->busy -= info->res->end - map_end;
-                       return 0;
-               }
-       } else if (info->res->end >= nd_mapping->start
-                       && info->res->end <= map_end) {
-               info->busy -= nd_mapping->start - info->res->start;
-               return 0;
-       } else {
-               info->busy -= nd_mapping->size;
-               return 0;
-       }
-}
-
 /**
  * nd_blk_available_dpa - account the unused dpa of BLK region
  * @nd_mapping: container of dpa-resource-root + labels
@@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
        for_each_dpa_resource(ndd, res) {
                if (strncmp(res->name, "blk", 3) != 0)
                        continue;
-
-               info.res = res;
-               info.busy = resource_size(res);
-               device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
-               info.available -= info.busy;
+               info.available -= resource_size(res);
        }
 
        return info.available;
index 9583a5f58a1ddc498b89d1b6cbaa14cf1a60bea2..eeb409c287b8ed304bdafea7804619dfb39d15b5 100644 (file)
@@ -1315,6 +1315,14 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                        if (target)
                                table->entries[state] = target;
 
+                       /*
+                        * Don't allow transitions to the deepest state
+                        * if it's quirked off.
+                        */
+                       if (state == ctrl->npss &&
+                           (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
+                               continue;
+
                        /*
                         * Is this state a useful non-operational state for
                         * higher-power states to autonomously transition to?
@@ -1387,16 +1395,15 @@ struct nvme_core_quirk_entry {
 };
 
 static const struct nvme_core_quirk_entry core_quirks[] = {
-       /*
-        * Seen on a Samsung "SM951 NVMe SAMSUNG 256GB": using APST causes
-        * the controller to go out to lunch.  It dies when the watchdog
-        * timer reads CSTS and gets 0xffffffff.
-        */
        {
-               .vid = 0x144d,
-               .fr = "BXW75D0Q",
+               /*
+                * This Toshiba device seems to die using any APST states.  See:
+                * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
+                */
+               .vid = 0x1179,
+               .mn = "THNSF5256GPUK TOSHIBA",
                .quirks = NVME_QUIRK_NO_APST,
-       },
+       }
 };
 
 /* match is null-terminated but idstr is space-padded. */
index 9690beb15e69ab47bb04345da5f142ec56141035..d996ca73d3be37c2332352fdc45767e738d72822 100644 (file)
@@ -2023,7 +2023,7 @@ nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
        }
 
        ctrl->ctrl.sqsize =
-               min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+               min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
 
        error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
        if (error)
index 2aa20e3e5675bf14a8aaf8784bafc344b970a052..ab2d6ec7eb5cc32292a3a9aec3a536fb21f29c4c 100644 (file)
@@ -83,6 +83,11 @@ enum nvme_quirks {
         * APST should not be used.
         */
        NVME_QUIRK_NO_APST                      = (1 << 4),
+
+       /*
+        * The deepest sleep state should not be used.
+        */
+       NVME_QUIRK_NO_DEEPEST_PS                = (1 << 5),
 };
 
 /*
index 26a5fd05fe88aa003a00dc4ece6e9900bd95e618..5d309535abbd6fbef366cc61b254dceac2719618 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/blk-mq-pci.h>
 #include <linux/cpu.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
 #include <linux/genhd.h>
@@ -1943,10 +1944,31 @@ static int nvme_dev_map(struct nvme_dev *dev)
        return -ENODEV;
 }
 
+static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
+{
+       if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
+               /*
+                * Several Samsung devices seem to drop off the PCIe bus
+                * randomly when APST is on and uses the deepest sleep state.
+                * This has been observed on a Samsung "SM951 NVMe SAMSUNG
+                * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
+                * 950 PRO 256GB", but it seems to be restricted to two Dell
+                * laptops.
+                */
+               if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
+                   (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
+                    dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
+                       return NVME_QUIRK_NO_DEEPEST_PS;
+       }
+
+       return 0;
+}
+
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int node, result = -ENOMEM;
        struct nvme_dev *dev;
+       unsigned long quirks = id->driver_data;
 
        node = dev_to_node(&pdev->dev);
        if (node == NUMA_NO_NODE)
@@ -1978,8 +2000,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto put_pci;
 
+       quirks |= check_dell_samsung_bug(pdev);
+
        result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
-                       id->driver_data);
+                       quirks);
        if (result)
                goto release_pools;
 
index 47a479f26e5d7de3605c0263d1a0cdbba1b7e1c1..16f84eb0b95e8608b73b196763f8bbd886087b22 100644 (file)
@@ -1606,7 +1606,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
        }
 
        ctrl->ctrl.sqsize =
-               min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+               min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
 
        error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
        if (error)
index 22f7bc6bac7fa77dd48198cde3a31ef60ead531b..c7b0b6a527083f7d865e752c7e953b2e1411b808 100644 (file)
@@ -392,7 +392,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        }
 
        ctrl->ctrl.sqsize =
-               min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+               min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
 
        error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
        if (error)
index fd66a3199db77d41d08e0658d89611370d46446f..cf9d6a9d9fd4fc17afd19d0e659f5e7cff0a3ca8 100644 (file)
@@ -380,9 +380,13 @@ struct pci_ecam_ops hisi_pcie_platform_ops = {
 
 static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
        {
-               .compatible = "hisilicon,pcie-almost-ecam",
+               .compatible =  "hisilicon,hip06-pcie-ecam",
                .data       = (void *) &hisi_pcie_platform_ops,
        },
+       {
+               .compatible =  "hisilicon,hip07-pcie-ecam",
+               .data       = (void *) &hisi_pcie_platform_ops,
+       },
        {},
 };
 
index f80134e3e0b68aba9f8b94771702cdd3df83a678..9ff790174906e46962ec9b9fa00f4f04de14aa18 100644 (file)
@@ -13,6 +13,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/dmi.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
+/*
+ * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
+ * tables. Since we leave GPIOs that are not capable of generating
+ * interrupts out of the irqdomain the numbering will be different and
+ * cause devices using the hardcoded IRQ numbers fail. In order not to
+ * break such machines we will only mask pins from irqdomain if the machine
+ * is not listed below.
+ */
+static const struct dmi_system_id chv_no_valid_mask[] = {
+       {
+               /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
+               .ident = "Acer Chromebook (CYAN)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
+                       DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+               },
+       }
+};
+
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
 {
        const struct chv_gpio_pinrange *range;
        struct gpio_chip *chip = &pctrl->chip;
+       bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
        int ret, i, offset;
 
        *chip = chv_gpio_chip;
@@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        chip->label = dev_name(pctrl->dev);
        chip->parent = pctrl->dev;
        chip->base = -1;
-       chip->irq_need_valid_mask = true;
+       chip->irq_need_valid_mask = need_valid_mask;
 
        ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
        if (ret) {
@@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                intsel &= CHV_PADCTRL0_INTSEL_MASK;
                intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
 
-               if (intsel >= pctrl->community->nirqs)
+               if (need_valid_mask && intsel >= pctrl->community->nirqs)
                        clear_bit(i, chip->irq_valid_mask);
        }
 
index f9b49967f512b52cec5447ae0baee9e146745d1b..63e51b56a22a94c528489a8d18aaf38795f0072e 100644 (file)
@@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
 
 /* pin banks of exynos5433 pin-controller - ALIVE */
 static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
-       EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
-       EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
-       EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
-       EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
 };
 
 /* pin banks of exynos5433 pin-controller - AUD */
 static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
-       EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
 };
 
 /* pin banks of exynos5433 pin-controller - CPIF */
 static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - eSE */
 static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - FINGER */
 static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - FSYS */
 static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
-       EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
-       EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
-       EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
-       EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
-       EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
 };
 
 /* pin banks of exynos5433 pin-controller - IMEM */
 static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - NFC */
 static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - PERIC */
 static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
-       EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
-       EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
-       EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
-       EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
-       EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
-       EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
-       EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
-       EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
-       EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
-       EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
-       EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
-       EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
-       EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
-       EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
-       EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
-       EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
 };
 
 /* pin banks of exynos5433 pin-controller - TOUCH */
 static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
 };
 
 /*
index a473092fb8d2362f11a1a48beb7f545b7c9f25ce..cd046eb7d705682fae1293abf432b9d04465328f 100644 (file)
                .name           = id                    \
        }
 
-#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
-       {                                               \
-               .type           = &bank_type_alive,     \
-               .pctl_offset    = reg,                  \
-               .nr_pins        = pins,                 \
-               .eint_type      = EINT_TYPE_WKUP,       \
-               .eint_offset    = offs,                 \
-               .name           = id,                   \
-               .pctl_res_idx   = pctl_idx,             \
-       }                                               \
-
 #define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs)         \
        {                                                       \
                .type           = &exynos5433_bank_type_off,    \
index 053088b9b66edb4c50c3b47c5e942b6e2e343c00..c1527cb645be8d0c038742559e270d1914ac8726 100644 (file)
@@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
        .clk_rate = 19200000,
        .npwm = 4,
        .base_unit_bits = 22,
+       .bypass = true,
+};
+
+/* Tangier */
+static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
+       .clk_rate = 19200000,
+       .npwm = 4,
+       .base_unit_bits = 22,
 };
 
 static int pwm_lpss_probe_pci(struct pci_dev *pdev,
@@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
        { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
        { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
-       { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info},
+       { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
        { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
        { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
        { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
index b22b6fdadb9ae14e0e55f28ecbfcece7e971eb1f..5d6ed1507d29284f2ba28f2cc781f4b797067f01 100644 (file)
@@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
        .clk_rate = 19200000,
        .npwm = 4,
        .base_unit_bits = 22,
+       .bypass = true,
 };
 
 static int pwm_lpss_probe_platform(struct platform_device *pdev)
index 689d2c1cbead80f5b6540dac13f1f0e56edfecfa..8db0d40ccacde84a61d292936f6bbdeeed7ac358 100644 (file)
@@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value)
        writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
 }
 
-static int pwm_lpss_update(struct pwm_device *pwm)
+static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
 {
        struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
        const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
@@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm)
        u32 val;
        int err;
 
-       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
-
        /*
         * PWM Configuration register has SW_UPDATE bit that is set when a new
         * configuration is written to the register. The bit is automatically
@@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
        pwm_lpss_write(pwm, ctrl);
 }
 
+static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
+{
+       if (cond)
+               pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
+}
+
 static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                          struct pwm_state *state)
 {
@@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                                return ret;
                        }
                        pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
-                       ret = pwm_lpss_update(pwm);
+                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
+                       pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
+                       ret = pwm_lpss_wait_for_update(pwm);
                        if (ret) {
                                pm_runtime_put(chip->dev);
                                return ret;
                        }
-                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
+                       pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
                } else {
                        ret = pwm_lpss_is_updating(pwm);
                        if (ret)
                                return ret;
                        pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
-                       return pwm_lpss_update(pwm);
+                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
+                       return pwm_lpss_wait_for_update(pwm);
                }
        } else if (pwm_is_enabled(pwm)) {
                pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
index c94cd7c2695da72181830f35d52c2ce12c6afc18..98306bb02cfe71c0775eb430e7cf623fdc431889 100644 (file)
@@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo {
        unsigned long clk_rate;
        unsigned int npwm;
        unsigned long base_unit_bits;
+       bool bypass;
 };
 
 struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
index ef89df1f7336c77a70c3a4de42b6d71ec96162ba..744d56197286a45eb148de437d31a3c3ca8fbd1a 100644 (file)
@@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        return 0;
 }
 
+static int rockchip_pwm_enable(struct pwm_chip *chip,
+                        struct pwm_device *pwm,
+                        bool enable,
+                        enum pwm_polarity polarity)
+{
+       struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+       int ret;
+
+       if (enable) {
+               ret = clk_enable(pc->clk);
+               if (ret)
+                       return ret;
+       }
+
+       pc->data->set_enable(chip, pwm, enable, polarity);
+
+       if (!enable)
+               clk_disable(pc->clk);
+
+       return 0;
+}
+
 static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                              struct pwm_state *state)
 {
@@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return ret;
 
        if (state->polarity != curstate.polarity && enabled) {
-               pc->data->set_enable(chip, pwm, false, state->polarity);
+               ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
+               if (ret)
+                       goto out;
                enabled = false;
        }
 
        ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (ret) {
                if (enabled != curstate.enabled)
-                       pc->data->set_enable(chip, pwm, !enabled,
-                                            state->polarity);
-
+                       rockchip_pwm_enable(chip, pwm, !enabled,
+                                     state->polarity);
                goto out;
        }
 
-       if (state->enabled != enabled)
-               pc->data->set_enable(chip, pwm, state->enabled,
-                                    state->polarity);
+       if (state->enabled != enabled) {
+               ret = rockchip_pwm_enable(chip, pwm, state->enabled,
+                                   state->polarity);
+               if (ret)
+                       goto out;
+       }
 
        /*
         * Update the state with the real hardware, which can differ a bit
index f1e5e65388bb525b186f9257794afcf3564d2c3f..cd739d2fa160387c91b08e1b4d4f470394599c57 100644 (file)
@@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc)
 }
 EXPORT_SYMBOL_GPL(reset_control_status);
 
-static struct reset_control *__reset_control_get(
+static struct reset_control *__reset_control_get_internal(
                                struct reset_controller_dev *rcdev,
                                unsigned int index, bool shared)
 {
@@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get(
        return rstc;
 }
 
-static void __reset_control_put(struct reset_control *rstc)
+static void __reset_control_put_internal(struct reset_control *rstc)
 {
        lockdep_assert_held(&reset_list_mutex);
 
@@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
        }
 
        /* reset_list_mutex also protects the rcdev's reset_control list */
-       rstc = __reset_control_get(rcdev, rstc_id, shared);
+       rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
 
        mutex_unlock(&reset_list_mutex);
 
@@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
 }
 EXPORT_SYMBOL_GPL(__of_reset_control_get);
 
+struct reset_control *__reset_control_get(struct device *dev, const char *id,
+                                         int index, bool shared, bool optional)
+{
+       if (dev->of_node)
+               return __of_reset_control_get(dev->of_node, id, index, shared,
+                                             optional);
+
+       return optional ? NULL : ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(__reset_control_get);
+
 /**
  * reset_control_put - free the reset controller
  * @rstc: reset controller
@@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc)
                return;
 
        mutex_lock(&reset_list_mutex);
-       __reset_control_put(rstc);
+       __reset_control_put_internal(rstc);
        mutex_unlock(&reset_list_mutex);
 }
 EXPORT_SYMBOL_GPL(reset_control_put);
@@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
        if (!ptr)
                return ERR_PTR(-ENOMEM);
 
-       rstc = __of_reset_control_get(dev ? dev->of_node : NULL,
-                                     id, index, shared, optional);
+       rstc = __reset_control_get(dev, id, index, shared, optional);
        if (!IS_ERR(rstc)) {
                *ptr = rstc;
                devres_add(dev, ptr);
index d036a806f31c47917e2a35cac4a2666bfffdb3eb..d281492009fb4457131b5ef87b04b58d8f3d94c3 100644 (file)
@@ -1690,9 +1690,6 @@ struct aac_dev
 #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
        (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
 
-#define aac_adapter_check_health(dev) \
-       (dev)->a_ops.adapter_check_health(dev)
-
 #define aac_adapter_restart(dev, bled, reset_type) \
        ((dev)->a_ops.adapter_restart(dev, bled, reset_type))
 
@@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
        return capacity;
 }
 
+static inline int aac_adapter_check_health(struct aac_dev *dev)
+{
+       if (unlikely(pci_channel_offline(dev->pdev)))
+               return -1;
+
+       return (dev)->a_ops.adapter_check_health(dev);
+}
+
 /* SCp.phase values */
 #define AAC_OWNER_MIDLEVEL     0x101
 #define AAC_OWNER_LOWLEVEL     0x102
index c8172f16cf33cd6454ae571c3310f6451afa8b79..1f4918355fdb00a9abab06da1e154b710fb86b0e 100644 (file)
@@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac)
        spin_unlock_irqrestore(&aac->fib_lock, flagv);
 
        if (BlinkLED < 0) {
-               printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
+               printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
+                               aac->name, BlinkLED);
                goto out;
        }
 
index b29afafc28857e95bffd8946598748907ab77b17..5d5e272fd815a3ed076eb52e2df47d3ff765fd3a 100644 (file)
@@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
                break;
        case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
        case IPR_IOASA_IR_DUAL_IOA_DISABLED:
-               scsi_cmd->result |= (DID_PASSTHROUGH << 16);
+               /*
+                * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
+                * so SCSI mid-layer and upper layers handle it accordingly.
+                */
+               if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
+                       scsi_cmd->result |= (DID_PASSTHROUGH << 16);
                break;
        case IPR_IOASC_BUS_WAS_RESET:
        case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
index ed58b9104f58b8894b3dfd924c6facfaba29be67..e10b91cc3c62388ccee0db756f1371d7c8037ff9 100644 (file)
@@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
                qedf_set_vlan_id(qedf, vid);
 
                /* Inform waiter that it's ok to call fcoe_ctlr_link up() */
-               complete(&qedf->fipvlan_compl);
+               if (!completion_done(&qedf->fipvlan_compl))
+                       complete(&qedf->fipvlan_compl);
        }
 }
 
index 8e2a160490e66a747e75bf9c0c1a149e34d474a3..cceddd995a4bf46605ae94143cfcff9df693fb83 100644 (file)
@@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
                atomic_set(&qedf->num_offloads, 0);
                qedf->stop_io_on_error = false;
                pci_set_drvdata(pdev, qedf);
+               init_completion(&qedf->fipvlan_compl);
 
                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
                   "QLogic FastLinQ FCoE Module qedf %s, "
index 3e7011757c8267022744e19778f49cc4db286822..83d61d2142e98d9c48ad3a6dcc5acf4183e293ff 100644 (file)
@@ -1160,8 +1160,13 @@ static inline
 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
 {
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
 
-       return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT);
+       if (IS_P3P_TYPE(ha))
+               return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
+       else
+               return ((RD_REG_DWORD(&reg->host_status)) ==
+                       ISP_REG_DISCONNECT);
 }
 
 /**************************************************************************
index e5a2d590a104d5cf7e63c1a7acfb94d7639a51da..15c9fe766071a392cbaf49b329c60188cc3e6484 100644 (file)
@@ -1061,10 +1061,10 @@ int scsi_init_io(struct scsi_cmnd *cmd)
        struct scsi_device *sdev = cmd->device;
        struct request *rq = cmd->request;
        bool is_mq = (rq->mq_ctx != NULL);
-       int error;
+       int error = BLKPREP_KILL;
 
        if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
-               return -EINVAL;
+               goto err_exit;
 
        error = scsi_init_sgtable(rq, &cmd->sdb);
        if (error)
index fcfeddc79331bbf32a71e296cf606513ae5b3d78..35ad5e8a31ab3b7214cdfb5b57be90c3c7f9d306 100644 (file)
@@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
 
 #define READ_CAPACITY_RETRIES_ON_RESET 10
 
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+       u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+       if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+               return false;
+
+       return true;
+}
+
 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
                                                unsigned char *buffer)
 {
@@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
                return -ENODEV;
        }
 
-       if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+       if (!sd_addressable_capacity(lba, sector_size)) {
                sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
                        "kernel compiled with support for large block "
                        "devices.\n");
@@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
                return sector_size;
        }
 
-       if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+       if (!sd_addressable_capacity(lba, sector_size)) {
                sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
                        "kernel compiled with support for large block "
                        "devices.\n");
@@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
                q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
                rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
        } else
-               rw_max = BLK_DEF_MAX_SECTORS;
+               rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
+                                     (sector_t)BLK_DEF_MAX_SECTORS);
 
        /* Combine with controller limits */
        q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
index 0b29b9329b1c2f5c8207188884498f314e0a414a..a8f630213a1a0fce250e23b00a9452676389b159 100644 (file)
@@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd)
        unsigned char *buffer;
        struct scsi_mode_data data;
        struct scsi_sense_hdr sshdr;
+       unsigned int ms_len = 128;
        int rc, n;
 
        static const char *loadmech[] =
@@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd)
        scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
 
        /* ask for mode page 0x2a */
-       rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+       rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
                             SR_TIMEOUT, 3, &data, NULL);
 
-       if (!scsi_status_is_good(rc)) {
+       if (!scsi_status_is_good(rc) || data.length > ms_len ||
+           data.header_length + data.block_descriptor_length > data.length) {
                /* failed, drive doesn't have capabilities mode page */
                cd->cdi.speed = 1;
                cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
index a91802432f2f47d1b163ba9f8e2da90dabe28e62..e3f9ed3690b7a86103472de987c03fd76becd59b 100644 (file)
@@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
 
 int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 {
-       iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
-       return 0;
+       return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
 }
 EXPORT_SYMBOL(iscsit_queue_rsp);
 
index bf40f03755ddc50697652ccde864d40df840fa0b..344e8448869c15c9e078ef708debf98cac1aae1a 100644 (file)
@@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
 static int lio_queue_data_in(struct se_cmd *se_cmd)
 {
        struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+       struct iscsi_conn *conn = cmd->conn;
 
        cmd->i_state = ISTATE_SEND_DATAIN;
-       cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
-
-       return 0;
+       return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
 }
 
 static int lio_write_pending(struct se_cmd *se_cmd)
@@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
 static int lio_queue_status(struct se_cmd *se_cmd)
 {
        struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+       struct iscsi_conn *conn = cmd->conn;
 
        cmd->i_state = ISTATE_SEND_STATUS;
 
        if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
-               iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
-               return 0;
+               return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
        }
-       cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
-
-       return 0;
+       return conn->conn_transport->iscsit_queue_status(conn, cmd);
 }
 
 static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
index e65bf78ceef3740fc1923c1b3ed446aa2996b82d..fce627628200cf9917a8212da7a17ccff3d6136c 100644 (file)
@@ -781,22 +781,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
        } else if (IS_TYPE_NUMBER(param)) {
                if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
                        SET_PSTATE_REPLY_OPTIONAL(param);
-               /*
-                * The GlobalSAN iSCSI Initiator for MacOSX does
-                * not respond to MaxBurstLength, FirstBurstLength,
-                * DefaultTime2Wait or DefaultTime2Retain parameter keys.
-                * So, we set them to 'reply optional' here, and assume the
-                * the defaults from iscsi_parameters.h if the initiator
-                * is not RFC compliant and the keys are not negotiated.
-                */
-               if (!strcmp(param->name, MAXBURSTLENGTH))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
-               if (!strcmp(param->name, FIRSTBURSTLENGTH))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
-               if (!strcmp(param->name, DEFAULTTIME2WAIT))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
-               if (!strcmp(param->name, DEFAULTTIME2RETAIN))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
                /*
                 * Required for gPXE iSCSI boot client
                 */
index 5041a9c8bdcbfd9bf9eb9368e850bdb8792a6be9..7d3e2fcc26a0da82629102693a99750622afed95 100644 (file)
@@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
        }
 }
 
-void iscsit_add_cmd_to_response_queue(
+int iscsit_add_cmd_to_response_queue(
        struct iscsi_cmd *cmd,
        struct iscsi_conn *conn,
        u8 state)
@@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
        if (!qr) {
                pr_err("Unable to allocate memory for"
                        " struct iscsi_queue_req\n");
-               return;
+               return -ENOMEM;
        }
        INIT_LIST_HEAD(&qr->qr_list);
        qr->cmd = cmd;
@@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
        spin_unlock_bh(&conn->response_queue_lock);
 
        wake_up(&conn->queues_wq);
+       return 0;
 }
 
 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
 {
        struct se_cmd *se_cmd = NULL;
        int rc;
+       bool op_scsi = false;
        /*
         * Determine if a struct se_cmd is associated with
         * this struct iscsi_cmd.
         */
        switch (cmd->iscsi_opcode) {
        case ISCSI_OP_SCSI_CMD:
-               se_cmd = &cmd->se_cmd;
-               __iscsit_free_cmd(cmd, true, shutdown);
+               op_scsi = true;
                /*
                 * Fallthrough
                 */
        case ISCSI_OP_SCSI_TMFUNC:
-               rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
-               if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
-                       __iscsit_free_cmd(cmd, true, shutdown);
+               se_cmd = &cmd->se_cmd;
+               __iscsit_free_cmd(cmd, op_scsi, shutdown);
+               rc = transport_generic_free_cmd(se_cmd, shutdown);
+               if (!rc && shutdown && se_cmd->se_sess) {
+                       __iscsit_free_cmd(cmd, op_scsi, shutdown);
                        target_put_sess_cmd(se_cmd);
                }
                break;
index 8ff08856516aba68394fc07661ec71b635c8b6a2..9e4197af8708e1056a08f4c01d423581a075bef1 100644 (file)
@@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
                        struct iscsi_conn_recovery **, itt_t);
 extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
 extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
-extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
 extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
 extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
 extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
index fd7c16a7ca6e06ad53e6d6df54ab739550ae4a4a..fc4a9c303d559f95b1216857efd8ae6ce77b96ca 100644 (file)
@@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
                /*
                 * Set the ASYMMETRIC ACCESS State
                 */
-               buf[off++] |= (atomic_read(
-                       &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+               buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
                /*
                 * Set supported ASYMMETRIC ACCESS State bits
                 */
@@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
 
        spin_lock(&lun->lun_tg_pt_gp_lock);
        tg_pt_gp = lun->lun_tg_pt_gp;
-       out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+       out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
        nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 
        // XXX: keeps using tg_pt_gp witout reference after unlock
@@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
 }
 
 /*
- * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
  */
 static int core_alua_update_tpg_primary_metadata(
        struct t10_alua_tg_pt_gp *tg_pt_gp)
@@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
                        "alua_access_state=0x%02x\n"
                        "alua_access_status=0x%02x\n",
                        tg_pt_gp->tg_pt_gp_id,
-                       tg_pt_gp->tg_pt_gp_alua_pending_state,
+                       tg_pt_gp->tg_pt_gp_alua_access_state,
                        tg_pt_gp->tg_pt_gp_alua_access_status);
 
        snprintf(path, ALUA_METADATA_PATH_LEN,
@@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 }
 
-static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
-{
-       struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
-               struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
-       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
-       bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
-                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
-
-       /*
-        * Update the ALUA metadata buf that has been allocated in
-        * core_alua_do_port_transition(), this metadata will be written
-        * to struct file.
-        *
-        * Note that there is the case where we do not want to update the
-        * metadata when the saved metadata is being parsed in userspace
-        * when setting the existing port access state and access status.
-        *
-        * Also note that the failure to write out the ALUA metadata to
-        * struct file does NOT affect the actual ALUA transition.
-        */
-       if (tg_pt_gp->tg_pt_gp_write_metadata) {
-               mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
-               core_alua_update_tpg_primary_metadata(tg_pt_gp);
-               mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
-       }
-       /*
-        * Set the current primary ALUA access state to the requested new state
-        */
-       atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
-                  tg_pt_gp->tg_pt_gp_alua_pending_state);
-
-       pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
-               " from primary access state %s to %s\n", (explicit) ? "explicit" :
-               "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
-               tg_pt_gp->tg_pt_gp_id,
-               core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
-               core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
-
-       core_alua_queue_state_change_ua(tg_pt_gp);
-
-       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
-       atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
-       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
-
-       if (tg_pt_gp->tg_pt_gp_transition_complete)
-               complete(tg_pt_gp->tg_pt_gp_transition_complete);
-}
-
 static int core_alua_do_transition_tg_pt(
        struct t10_alua_tg_pt_gp *tg_pt_gp,
        int new_state,
        int explicit)
 {
-       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
-       DECLARE_COMPLETION_ONSTACK(wait);
+       int prev_state;
 
+       mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
        /* Nothing to be done here */
-       if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
+       if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
+               mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
                return 0;
+       }
 
-       if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
+       if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
+               mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
                return -EAGAIN;
-
-       /*
-        * Flush any pending transitions
-        */
-       if (!explicit)
-               flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
+       }
 
        /*
         * Save the old primary ALUA access state, and set the current state
         * to ALUA_ACCESS_STATE_TRANSITION.
         */
-       atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
-                       ALUA_ACCESS_STATE_TRANSITION);
+       prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
+       tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
        tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
                                ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
                                ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
 
        core_alua_queue_state_change_ua(tg_pt_gp);
 
-       if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+       if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
+               mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
                return 0;
-
-       tg_pt_gp->tg_pt_gp_alua_previous_state =
-               atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
-       tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+       }
 
        /*
         * Check for the optional ALUA primary state transition delay
@@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt(
                msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
 
        /*
-        * Take a reference for workqueue item
+        * Set the current primary ALUA access state to the requested new state
         */
-       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
-       atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
-       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+       tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
 
-       schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
-       if (explicit) {
-               tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-               wait_for_completion(&wait);
-               tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+       /*
+        * Update the ALUA metadata buf that has been allocated in
+        * core_alua_do_port_transition(), this metadata will be written
+        * to struct file.
+        *
+        * Note that there is the case where we do not want to update the
+        * metadata when the saved metadata is being parsed in userspace
+        * when setting the existing port access state and access status.
+        *
+        * Also note that the failure to write out the ALUA metadata to
+        * struct file does NOT affect the actual ALUA transition.
+        */
+       if (tg_pt_gp->tg_pt_gp_write_metadata) {
+               core_alua_update_tpg_primary_metadata(tg_pt_gp);
        }
 
+       pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
+               " from primary access state %s to %s\n", (explicit) ? "explicit" :
+               "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+               tg_pt_gp->tg_pt_gp_id,
+               core_alua_dump_state(prev_state),
+               core_alua_dump_state(new_state));
+
+       core_alua_queue_state_change_ua(tg_pt_gp);
+
+       mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
        return 0;
 }
 
@@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
        }
        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
-       mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+       mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
-       INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
-                 core_alua_do_transition_tg_pt_work);
        tg_pt_gp->tg_pt_gp_dev = dev;
-       atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
-               ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
+       tg_pt_gp->tg_pt_gp_alua_access_state =
+                       ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
        /*
         * Enable both explicit and implicit ALUA support by default
         */
@@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
        dev->t10_alua.alua_tg_pt_gps_counter--;
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-       flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
-
        /*
         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
         * core_alua_get_tg_pt_gp_by_name() in
@@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
                        "Primary Access Status: %s\nTG Port Secondary Access"
                        " State: %s\nTG Port Secondary Access Status: %s\n",
                        config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
-                       core_alua_dump_state(atomic_read(
-                                       &tg_pt_gp->tg_pt_gp_alua_access_state)),
+                       core_alua_dump_state(
+                               tg_pt_gp->tg_pt_gp_alua_access_state),
                        core_alua_dump_status(
                                tg_pt_gp->tg_pt_gp_alua_access_status),
                        atomic_read(&lun->lun_tg_pt_secondary_offline) ?
index 38b5025e4c7a877f9e5c0bcfa6995262b6330e32..70657fd564406b3c137b02c3f61824f387f554aa 100644 (file)
@@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
                char *page)
 {
        return sprintf(page, "%d\n",
-               atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state));
+                      to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
 }
 
 static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
index d8a16ca6baa507b235cbec2fbff56a648874fd2e..d1e6cab8e3d3f0a95cb801f2680c2b8e2474de1d 100644 (file)
@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
                pr_err("Source se_lun->lun_se_dev does not exist\n");
                return -EINVAL;
        }
+       if (lun->lun_shutdown) {
+               pr_err("Unable to create mappedlun symlink because"
+                       " lun->lun_shutdown=true\n");
+               return -EINVAL;
+       }
        se_tpg = lun->lun_tpg;
 
        nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
index 6fb191914f458f7889508652e19b860355387491..dfaef4d3b2d2698088754c08f1846bc893815237 100644 (file)
@@ -642,6 +642,8 @@ void core_tpg_remove_lun(
         */
        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 
+       lun->lun_shutdown = true;
+
        core_clear_lun_from_tpg(lun, tpg);
        /*
         * Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -663,6 +665,8 @@ void core_tpg_remove_lun(
        }
        if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
                hlist_del_rcu(&lun->link);
+
+       lun->lun_shutdown = false;
        mutex_unlock(&tpg->tpg_lun_mutex);
 
        percpu_ref_exit(&lun->lun_ref);
index b1a3cdb29468cf84e7eb48d6c8c41934c0b5b4cb..a0cd56ee5fe984f7ddf27c41157f7314343d23c1 100644 (file)
@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
 struct kmem_cache *t10_alua_lba_map_mem_cache;
 
 static void transport_complete_task_attr(struct se_cmd *cmd);
+static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
 static void transport_handle_queue_full(struct se_cmd *cmd,
-               struct se_device *dev);
+               struct se_device *dev, int err, bool write_pending);
 static int transport_put_cmd(struct se_cmd *cmd);
 static void target_complete_ok_work(struct work_struct *work);
 
@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
 
                if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
                        transport_write_pending_qf(cmd);
-               else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+               else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
+                        cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
                        transport_complete_qf(cmd);
        }
 }
@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
                }
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_status(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                goto check_stop;
        default:
@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        }
 
        ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
-       if (ret == -EAGAIN || ret == -ENOMEM)
+       if (ret)
                goto queue_full;
 
 check_stop:
@@ -1739,8 +1741,7 @@ check_stop:
        return;
 
 queue_full:
-       cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 EXPORT_SYMBOL(transport_generic_request_failure);
 
@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
        int ret = 0;
 
        transport_complete_task_attr(cmd);
+       /*
+        * If a fabric driver ->write_pending() or ->queue_data_in() callback
+        * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
+        * the same callbacks should not be retried.  Return CHECK_CONDITION
+        * if a scsi_status is not already set.
+        *
+        * If a fabric driver ->queue_status() has returned non zero, always
+        * keep retrying no matter what..
+        */
+       if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
+               if (cmd->scsi_status)
+                       goto queue_status;
 
-       if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
-               trace_target_cmd_complete(cmd);
-               ret = cmd->se_tfo->queue_status(cmd);
-               goto out;
+               cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+               cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+               cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
+               translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+               goto queue_status;
        }
 
+       if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+               goto queue_status;
+
        switch (cmd->data_direction) {
        case DMA_FROM_DEVICE:
                if (cmd->scsi_status)
@@ -2007,19 +2024,33 @@ queue_status:
                break;
        }
 
-out:
        if (ret < 0) {
-               transport_handle_queue_full(cmd, cmd->se_dev);
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
                return;
        }
        transport_lun_remove_cmd(cmd);
        transport_cmd_check_stop_to_fabric(cmd);
 }
 
-static void transport_handle_queue_full(
-       struct se_cmd *cmd,
-       struct se_device *dev)
+static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
+                                       int err, bool write_pending)
 {
+       /*
+        * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
+        * ->queue_data_in() callbacks from new process context.
+        *
+        * Otherwise for other errors, transport_complete_qf() will send
+        * CHECK_CONDITION via ->queue_status() instead of attempting to
+        * retry associated fabric driver data-transfer callbacks.
+        */
+       if (err == -EAGAIN || err == -ENOMEM) {
+               cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
+                                                TRANSPORT_COMPLETE_QF_OK;
+       } else {
+               pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
+               cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
+       }
+
        spin_lock_irq(&dev->qf_cmd_lock);
        list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
        atomic_inc_mb(&dev->dev_qf_count);
@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
                WARN_ON(!cmd->scsi_status);
                ret = transport_send_check_condition_and_sense(
                                        cmd, 0, 1);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
 
                transport_lun_remove_cmd(cmd);
@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
                } else if (rc) {
                        ret = transport_send_check_condition_and_sense(cmd,
                                                rc, 0);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
 
                        transport_lun_remove_cmd(cmd);
@@ -2134,7 +2165,7 @@ queue_rsp:
                if (target_read_prot_action(cmd)) {
                        ret = transport_send_check_condition_and_sense(cmd,
                                                cmd->pi_err, 0);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
 
                        transport_lun_remove_cmd(cmd);
@@ -2144,7 +2175,7 @@ queue_rsp:
 
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_data_in(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                break;
        case DMA_TO_DEVICE:
@@ -2157,7 +2188,7 @@ queue_rsp:
                        atomic_long_add(cmd->data_length,
                                        &cmd->se_lun->lun_stats.tx_data_octets);
                        ret = cmd->se_tfo->queue_data_in(cmd);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
                        break;
                }
@@ -2166,7 +2197,7 @@ queue_rsp:
 queue_status:
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_status(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                break;
        default:
@@ -2180,8 +2211,8 @@ queue_status:
 queue_full:
        pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
                " data_direction: %d\n", cmd, cmd->data_direction);
-       cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 
 void target_free_sgl(struct scatterlist *sgl, int nents)
@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        ret = cmd->se_tfo->write_pending(cmd);
-       if (ret == -EAGAIN || ret == -ENOMEM)
+       if (ret)
                goto queue_full;
 
-       /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
-       WARN_ON(ret);
-
-       return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return 0;
 
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
-       cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
        return 0;
 }
 EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
        int ret;
 
        ret = cmd->se_tfo->write_pending(cmd);
-       if (ret == -EAGAIN || ret == -ENOMEM) {
+       if (ret) {
                pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
                         cmd);
-               transport_handle_queue_full(cmd, cmd->se_dev);
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
        }
 }
 
@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        __releases(&cmd->t_state_lock)
        __acquires(&cmd->t_state_lock)
 {
+       int ret;
+
        assert_spin_locked(&cmd->t_state_lock);
        WARN_ON_ONCE(!irqs_disabled());
 
@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        trace_target_cmd_complete(cmd);
 
        spin_unlock_irq(&cmd->t_state_lock);
-       cmd->se_tfo->queue_status(cmd);
+       ret = cmd->se_tfo->queue_status(cmd);
+       if (ret)
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
        spin_lock_irq(&cmd->t_state_lock);
 
        return 1;
@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
 void transport_send_task_abort(struct se_cmd *cmd)
 {
        unsigned long flags;
+       int ret;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
@@ -3090,7 +3122,9 @@ send_abort:
                 cmd->t_task_cdb[0], cmd->tag);
 
        trace_target_cmd_complete(cmd);
-       cmd->se_tfo->queue_status(cmd);
+       ret = cmd->se_tfo->queue_status(cmd);
+       if (ret)
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 
 static void target_tmr_work(struct work_struct *work)
index c6874c38a10bc45e86beae58ddfed175664d51cf..f615c3bbb73e8b7a2a7bf3f5039efd84c724cdf2 100644 (file)
@@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
                   DATA_BLOCK_BITS);
 }
 
-static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
-               struct scatterlist *data_sg, unsigned int data_nents)
+static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+                            bool bidi)
 {
+       struct se_cmd *se_cmd = cmd->se_cmd;
        int i, block;
        int block_remaining = 0;
        void *from, *to;
        size_t copy_bytes, from_offset;
-       struct scatterlist *sg;
+       struct scatterlist *sg, *data_sg;
+       unsigned int data_nents;
+       DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+
+       bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+
+       if (!bidi) {
+               data_sg = se_cmd->t_data_sg;
+               data_nents = se_cmd->t_data_nents;
+       } else {
+               uint32_t count;
+
+               /*
+                * For bidi case, the first count blocks are for Data-Out
+                * buffer blocks, and before gathering the Data-In buffer
+                * the Data-Out buffer blocks should be discarded.
+                */
+               count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+               while (count--) {
+                       block = find_first_bit(bitmap, DATA_BLOCK_BITS);
+                       clear_bit(block, bitmap);
+               }
+
+               data_sg = se_cmd->t_bidi_data_sg;
+               data_nents = se_cmd->t_bidi_data_nents;
+       }
 
        for_each_sg(data_sg, sg, data_nents, i) {
                int sg_remaining = sg->length;
                to = kmap_atomic(sg_page(sg)) + sg->offset;
                while (sg_remaining > 0) {
                        if (block_remaining == 0) {
-                               block = find_first_bit(cmd_bitmap,
+                               block = find_first_bit(bitmap,
                                                DATA_BLOCK_BITS);
                                block_remaining = DATA_BLOCK_SIZE;
-                               clear_bit(block, cmd_bitmap);
+                               clear_bit(block, bitmap);
                        }
                        copy_bytes = min_t(size_t, sg_remaining,
                                        block_remaining);
@@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
        return true;
 }
 
+static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
+{
+       struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+       size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
+
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+               data_length += round_up(se_cmd->t_bidi_data_sg->length,
+                               DATA_BLOCK_SIZE);
+       }
+
+       return data_length;
+}
+
+static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
+{
+       size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+
+       return data_length / DATA_BLOCK_SIZE;
+}
+
 static sense_reason_t
 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 {
@@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        uint32_t cmd_head;
        uint64_t cdb_off;
        bool copy_to_data_area;
-       size_t data_length;
+       size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
        DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
 
        if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
@@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
         * expensive to tell how many regions are freed in the bitmap
        */
        base_command_size = max(offsetof(struct tcmu_cmd_entry,
-                               req.iov[se_cmd->t_bidi_data_nents +
-                                       se_cmd->t_data_nents]),
+                               req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
                                sizeof(struct tcmu_cmd_entry));
        command_size = base_command_size
                + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 
        mb = udev->mb_addr;
        cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
-       data_length = se_cmd->data_length;
-       if (se_cmd->se_cmd_flags & SCF_BIDI) {
-               BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
-               data_length += se_cmd->t_bidi_data_sg->length;
-       }
        if ((command_size > (udev->cmdr_size / 2)) ||
            data_length > udev->data_size) {
                pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
@@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        entry->req.iov_dif_cnt = 0;
 
        /* Handle BIDI commands */
-       iov_cnt = 0;
-       alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
-               se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
-       entry->req.iov_bidi_cnt = iov_cnt;
-
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               iov_cnt = 0;
+               iov++;
+               alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+                               se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
+                               false);
+               entry->req.iov_bidi_cnt = iov_cnt;
+       }
        /* cmd's data_bitmap is what changed in process */
        bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
                        DATA_BLOCK_BITS);
@@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                               se_cmd->scsi_sense_length);
                free_data_area(udev, cmd);
        } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
-               DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
                /* Get Data-In buffer before clean up */
-               bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
-               gather_data_area(udev, bitmap,
-                       se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+               gather_data_area(udev, cmd, true);
                free_data_area(udev, cmd);
        } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
-               DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
-               bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
-               gather_data_area(udev, bitmap,
-                       se_cmd->t_data_sg, se_cmd->t_data_nents);
+               gather_data_area(udev, cmd, false);
                free_data_area(udev, cmd);
        } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
                free_data_area(udev, cmd);
@@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
        if (ret < 0)
                return ret;
 
-       if (!val) {
-               pr_err("Illegal value for cmd_time_out\n");
-               return -EINVAL;
-       }
-
        udev->cmd_time_out = val * MSEC_PER_SEC;
        return count;
 }
index b0500a0a87b86161b8cf8befcee9753ff6cda74d..e4603b09863a8fa5ffd4467e81a463538f0533bb 100644 (file)
@@ -491,6 +491,41 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
        tty_ldisc_debug(tty, "%p: closed\n", ld);
 }
 
+/**
+ *     tty_ldisc_restore       -       helper for tty ldisc change
+ *     @tty: tty to recover
+ *     @old: previous ldisc
+ *
+ *     Restore the previous line discipline or N_TTY when a line discipline
+ *     change fails due to an open error
+ */
+
+static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
+{
+       struct tty_ldisc *new_ldisc;
+       int r;
+
+       /* There is an outstanding reference here so this is safe */
+       old = tty_ldisc_get(tty, old->ops->num);
+       WARN_ON(IS_ERR(old));
+       tty->ldisc = old;
+       tty_set_termios_ldisc(tty, old->ops->num);
+       if (tty_ldisc_open(tty, old) < 0) {
+               tty_ldisc_put(old);
+               /* This driver is always present */
+               new_ldisc = tty_ldisc_get(tty, N_TTY);
+               if (IS_ERR(new_ldisc))
+                       panic("n_tty: get");
+               tty->ldisc = new_ldisc;
+               tty_set_termios_ldisc(tty, N_TTY);
+               r = tty_ldisc_open(tty, new_ldisc);
+               if (r < 0)
+                       panic("Couldn't open N_TTY ldisc for "
+                             "%s --- error %d.",
+                             tty_name(tty), r);
+       }
+}
+
 /**
  *     tty_set_ldisc           -       set line discipline
  *     @tty: the terminal to set
@@ -504,7 +539,12 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
 
 int tty_set_ldisc(struct tty_struct *tty, int disc)
 {
-       int retval, old_disc;
+       int retval;
+       struct tty_ldisc *old_ldisc, *new_ldisc;
+
+       new_ldisc = tty_ldisc_get(tty, disc);
+       if (IS_ERR(new_ldisc))
+               return PTR_ERR(new_ldisc);
 
        tty_lock(tty);
        retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -517,8 +557,7 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
        }
 
        /* Check the no-op case */
-       old_disc = tty->ldisc->ops->num;
-       if (old_disc == disc)
+       if (tty->ldisc->ops->num == disc)
                goto out;
 
        if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -527,25 +566,34 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
                goto out;
        }
 
-       retval = tty_ldisc_reinit(tty, disc);
+       old_ldisc = tty->ldisc;
+
+       /* Shutdown the old discipline. */
+       tty_ldisc_close(tty, old_ldisc);
+
+       /* Now set up the new line discipline. */
+       tty->ldisc = new_ldisc;
+       tty_set_termios_ldisc(tty, disc);
+
+       retval = tty_ldisc_open(tty, new_ldisc);
        if (retval < 0) {
                /* Back to the old one or N_TTY if we can't */
-               if (tty_ldisc_reinit(tty, old_disc) < 0) {
-                       pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
-                       if (tty_ldisc_reinit(tty, N_TTY) < 0) {
-                               /* At this point we have tty->ldisc == NULL. */
-                               pr_err("tty: reinitializing N_TTY failed\n");
-                       }
-               }
+               tty_ldisc_put(new_ldisc);
+               tty_ldisc_restore(tty, old_ldisc);
        }
 
-       if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
-           tty->ops->set_ldisc) {
+       if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
                down_read(&tty->termios_rwsem);
                tty->ops->set_ldisc(tty);
                up_read(&tty->termios_rwsem);
        }
 
+       /* At this point we hold a reference to the new ldisc and a
+          reference to the old ldisc, or we hold two references to
+          the old ldisc (if it was restored as part of error cleanup
+          above). In either case, releasing a single reference from
+          the old ldisc is correct. */
+       new_ldisc = old_ldisc;
 out:
        tty_ldisc_unlock(tty);
 
@@ -553,6 +601,7 @@ out:
           already running */
        tty_buffer_restart_work(tty->port);
 err:
+       tty_ldisc_put(new_ldisc);       /* drop the extra reference */
        tty_unlock(tty);
        return retval;
 }
@@ -613,8 +662,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
        int retval;
 
        ld = tty_ldisc_get(tty, disc);
-       if (IS_ERR(ld))
+       if (IS_ERR(ld)) {
+               BUG_ON(disc == N_TTY);
                return PTR_ERR(ld);
+       }
 
        if (tty->ldisc) {
                tty_ldisc_close(tty, tty->ldisc);
@@ -626,8 +677,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
        tty_set_termios_ldisc(tty, disc);
        retval = tty_ldisc_open(tty, tty->ldisc);
        if (retval) {
-               tty_ldisc_put(tty->ldisc);
-               tty->ldisc = NULL;
+               if (!WARN_ON(disc == N_TTY)) {
+                       tty_ldisc_put(tty->ldisc);
+                       tty->ldisc = NULL;
+               }
        }
        return retval;
 }
index d2351139342f6200209078e769e04f5ea1eb2d1f..a82e2bd5ea34d97996cb79ba5b72d09aed86e09a 100644 (file)
@@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
        usb_ep_free_request(fu->ep_in, fu->bot_req_in);
        usb_ep_free_request(fu->ep_out, fu->bot_req_out);
        usb_ep_free_request(fu->ep_out, fu->cmd.req);
-       usb_ep_free_request(fu->ep_out, fu->bot_status.req);
+       usb_ep_free_request(fu->ep_in, fu->bot_status.req);
 
        kfree(fu->cmd.buf);
 
index d7efcb632f7d9dde08b0a494c455725b05d55af0..002f1ce22bd02032062924b19d645ebc25302c93 100644 (file)
@@ -297,14 +297,15 @@ static int pwm_backlight_probe(struct platform_device *pdev)
        }
 
        /*
-        * If the GPIO is configured as input, change the direction to output
-        * and set the GPIO as active.
+        * If the GPIO is not known to be already configured as output, that
+        * is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL,
+        * change the direction to output and set the GPIO as active.
         * Do not force the GPIO to active when it was already output as it
         * could cause backlight flickering or we would enable the backlight too
         * early. Leave the decision of the initial backlight state for later.
         */
        if (pb->enable_gpio &&
-           gpiod_get_direction(pb->enable_gpio) == GPIOF_DIR_IN)
+           gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT)
                gpiod_direction_output(pb->enable_gpio, 1);
 
        pb->power_supply = devm_regulator_get(&pdev->dev, "power");
index 8c4dc1e1f94fdb53ad7acf2808fa400c03787857..b827a8113e26803d8caee69e4ea59d6ceae56ea0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/efi.h>
 #include <linux/errno.h>
 #include <linux/fb.h>
+#include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
 #include <video/vga.h>
@@ -143,6 +144,8 @@ static struct attribute *efifb_attrs[] = {
 };
 ATTRIBUTE_GROUPS(efifb);
 
+static bool pci_dev_disabled;  /* FB base matches BAR of a disabled device */
+
 static int efifb_probe(struct platform_device *dev)
 {
        struct fb_info *info;
@@ -152,7 +155,7 @@ static int efifb_probe(struct platform_device *dev)
        unsigned int size_total;
        char *option = NULL;
 
-       if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+       if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
                return -ENODEV;
 
        if (fb_get_options("efifb", &option))
@@ -360,3 +363,64 @@ static struct platform_driver efifb_driver = {
 };
 
 builtin_platform_driver(efifb_driver);
+
+#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
+
+static bool pci_bar_found;     /* did we find a BAR matching the efifb base? */
+
+static void claim_efifb_bar(struct pci_dev *dev, int idx)
+{
+       u16 word;
+
+       pci_bar_found = true;
+
+       pci_read_config_word(dev, PCI_COMMAND, &word);
+       if (!(word & PCI_COMMAND_MEMORY)) {
+               pci_dev_disabled = true;
+               dev_err(&dev->dev,
+                       "BAR %d: assigned to efifb but device is disabled!\n",
+                       idx);
+               return;
+       }
+
+       if (pci_claim_resource(dev, idx)) {
+               pci_dev_disabled = true;
+               dev_err(&dev->dev,
+                       "BAR %d: failed to claim resource for efifb!\n", idx);
+               return;
+       }
+
+       dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
+}
+
+static void efifb_fixup_resources(struct pci_dev *dev)
+{
+       u64 base = screen_info.lfb_base;
+       u64 size = screen_info.lfb_size;
+       int i;
+
+       if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+               return;
+
+       if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+               base |= (u64)screen_info.ext_lfb_base << 32;
+
+       if (!base)
+               return;
+
+       for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+               struct resource *res = &dev->resource[i];
+
+               if (!(res->flags & IORESOURCE_MEM))
+                       continue;
+
+               if (res->start <= base && res->end >= base + size - 1) {
+                       claim_efifb_bar(dev, i);
+                       break;
+               }
+       }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
+                              16, efifb_fixup_resources);
+
+#endif
index 1abba07b84b3efd014b28bfd0ef61f3cad7cbeda..f4cbfb3b8a0980030e2a0c2bbd78348968d6b8a1 100644 (file)
@@ -1608,19 +1608,6 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
        return 0;
 }
 
-static void check_required_callbacks(struct omapfb_device *fbdev)
-{
-#define _C(x) (fbdev->ctrl->x != NULL)
-#define _P(x) (fbdev->panel->x != NULL)
-       BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
-       BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
-                _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
-                _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
-                _P(get_caps)));
-#undef _P
-#undef _C
-}
-
 /*
  * Called by LDM binding to probe and attach a new device.
  * Initialization sequence:
@@ -1705,8 +1692,6 @@ static int omapfb_do_probe(struct platform_device *pdev,
                omapfb_ops.fb_mmap = omapfb_mmap;
        init_state++;
 
-       check_required_callbacks(fbdev);
-
        r = planes_init(fbdev);
        if (r)
                goto cleanup;
index bd017b57c47f8af4ff1558cedaa8589a5f0ce9ff..f599520374ddf575bba1236b81bec2c4c2d21c49 100644 (file)
@@ -578,10 +578,14 @@ static int ssd1307fb_probe(struct i2c_client *client,
 
        par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
        if (IS_ERR(par->vbat_reg)) {
-               dev_err(&client->dev, "failed to get VBAT regulator: %ld\n",
-                       PTR_ERR(par->vbat_reg));
                ret = PTR_ERR(par->vbat_reg);
-               goto fb_alloc_error;
+               if (ret == -ENODEV) {
+                       par->vbat_reg = NULL;
+               } else {
+                       dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
+                               ret);
+                       goto fb_alloc_error;
+               }
        }
 
        if (of_property_read_u32(node, "solomon,width", &par->width))
@@ -668,10 +672,13 @@ static int ssd1307fb_probe(struct i2c_client *client,
                udelay(4);
        }
 
-       ret = regulator_enable(par->vbat_reg);
-       if (ret) {
-               dev_err(&client->dev, "failed to enable VBAT: %d\n", ret);
-               goto reset_oled_error;
+       if (par->vbat_reg) {
+               ret = regulator_enable(par->vbat_reg);
+               if (ret) {
+                       dev_err(&client->dev, "failed to enable VBAT: %d\n",
+                               ret);
+                       goto reset_oled_error;
+               }
        }
 
        ret = ssd1307fb_init(par);
@@ -710,7 +717,8 @@ panel_init_error:
                pwm_put(par->pwm);
        };
 regulator_enable_error:
-       regulator_disable(par->vbat_reg);
+       if (par->vbat_reg)
+               regulator_disable(par->vbat_reg);
 reset_oled_error:
        fb_deferred_io_cleanup(info);
 fb_alloc_error:
index d0115a7af0a9ae0f3171a66bf716f0f21c3e91b1..3ee309c50b2d015fb3d463dd88f6b99253beea12 100644 (file)
@@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateInitWait:
-InitWait:
                xenbus_switch_state(dev, XenbusStateConnected);
                break;
 
@@ -654,7 +653,8 @@ InitWait:
                 * get Connected twice here.
                 */
                if (dev->state != XenbusStateConnected)
-                       goto InitWait; /* no InitWait seen yet, fudge it */
+                       /* no InitWait seen yet, fudge it */
+                       xenbus_switch_state(dev, XenbusStateConnected);
 
                if (xenbus_read_unsigned(info->xbdev->otherend,
                                         "request-update", 0))
index 400d70b6937948cc5a8aac698efc64fcf49c1dde..48230a5e12f262b67d28d87adc713f462e8ec5fc 100644 (file)
@@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d)
                if (device_features & (1ULL << i))
                        __virtio_set_bit(dev, i);
 
+       if (drv->validate) {
+               err = drv->validate(dev);
+               if (err)
+                       goto err;
+       }
+
        err = virtio_finalize_features(dev);
        if (err)
                goto err;
index 590534910dc617836e18c91b6576410a0299de26..698d5d06fa039ca1a27b151a3dcf5d322784e3f0 100644 (file)
@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        int i;
 
-       synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0));
-       for (i = 1; i < vp_dev->msix_vectors; i++)
+       if (vp_dev->intx_enabled)
+               synchronize_irq(vp_dev->pci_dev->irq);
+
+       for (i = 0; i < vp_dev->msix_vectors; ++i)
                synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
 }
 
@@ -60,13 +62,16 @@ static irqreturn_t vp_config_changed(int irq, void *opaque)
 static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
 {
        struct virtio_pci_device *vp_dev = opaque;
+       struct virtio_pci_vq_info *info;
        irqreturn_t ret = IRQ_NONE;
-       struct virtqueue *vq;
+       unsigned long flags;
 
-       list_for_each_entry(vq, &vp_dev->vdev.vqs, list) {
-               if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED)
+       spin_lock_irqsave(&vp_dev->lock, flags);
+       list_for_each_entry(info, &vp_dev->virtqueues, node) {
+               if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
                        ret = IRQ_HANDLED;
        }
+       spin_unlock_irqrestore(&vp_dev->lock, flags);
 
        return ret;
 }
@@ -97,186 +102,244 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
        return vp_vring_interrupt(irq, opaque);
 }
 
-static void vp_remove_vqs(struct virtio_device *vdev)
+static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
+                                  bool per_vq_vectors, struct irq_affinity *desc)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       struct virtqueue *vq, *n;
+       const char *name = dev_name(&vp_dev->vdev.dev);
+       unsigned i, v;
+       int err = -ENOMEM;
 
-       list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
-               if (vp_dev->msix_vector_map) {
-                       int v = vp_dev->msix_vector_map[vq->index];
+       vp_dev->msix_vectors = nvectors;
 
-                       if (v != VIRTIO_MSI_NO_VECTOR)
-                               free_irq(pci_irq_vector(vp_dev->pci_dev, v),
-                                       vq);
-               }
-               vp_dev->del_vq(vq);
+       vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
+                                    GFP_KERNEL);
+       if (!vp_dev->msix_names)
+               goto error;
+       vp_dev->msix_affinity_masks
+               = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
+                         GFP_KERNEL);
+       if (!vp_dev->msix_affinity_masks)
+               goto error;
+       for (i = 0; i < nvectors; ++i)
+               if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
+                                       GFP_KERNEL))
+                       goto error;
+
+       err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
+                                            nvectors, PCI_IRQ_MSIX |
+                                            (desc ? PCI_IRQ_AFFINITY : 0),
+                                            desc);
+       if (err < 0)
+               goto error;
+       vp_dev->msix_enabled = 1;
+
+       /* Set the vector used for configuration */
+       v = vp_dev->msix_used_vectors;
+       snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+                "%s-config", name);
+       err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
+                         vp_config_changed, 0, vp_dev->msix_names[v],
+                         vp_dev);
+       if (err)
+               goto error;
+       ++vp_dev->msix_used_vectors;
+
+       v = vp_dev->config_vector(vp_dev, v);
+       /* Verify we had enough resources to assign the vector */
+       if (v == VIRTIO_MSI_NO_VECTOR) {
+               err = -EBUSY;
+               goto error;
        }
+
+       if (!per_vq_vectors) {
+               /* Shared vector for all VQs */
+               v = vp_dev->msix_used_vectors;
+               snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+                        "%s-virtqueues", name);
+               err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
+                                 vp_vring_interrupt, 0, vp_dev->msix_names[v],
+                                 vp_dev);
+               if (err)
+                       goto error;
+               ++vp_dev->msix_used_vectors;
+       }
+       return 0;
+error:
+       return err;
+}
+
+static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
+                                    void (*callback)(struct virtqueue *vq),
+                                    const char *name,
+                                    u16 msix_vec)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
+       struct virtqueue *vq;
+       unsigned long flags;
+
+       /* fill out our structure that represents an active queue */
+       if (!info)
+               return ERR_PTR(-ENOMEM);
+
+       vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
+                             msix_vec);
+       if (IS_ERR(vq))
+               goto out_info;
+
+       info->vq = vq;
+       if (callback) {
+               spin_lock_irqsave(&vp_dev->lock, flags);
+               list_add(&info->node, &vp_dev->virtqueues);
+               spin_unlock_irqrestore(&vp_dev->lock, flags);
+       } else {
+               INIT_LIST_HEAD(&info->node);
+       }
+
+       vp_dev->vqs[index] = info;
+       return vq;
+
+out_info:
+       kfree(info);
+       return vq;
+}
+
+static void vp_del_vq(struct virtqueue *vq)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+       struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
+       unsigned long flags;
+
+       spin_lock_irqsave(&vp_dev->lock, flags);
+       list_del(&info->node);
+       spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+       vp_dev->del_vq(info);
+       kfree(info);
 }
 
 /* the config->del_vqs() implementation */
 void vp_del_vqs(struct virtio_device *vdev)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtqueue *vq, *n;
        int i;
 
-       if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs)))
-               return;
+       list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+               if (vp_dev->per_vq_vectors) {
+                       int v = vp_dev->vqs[vq->index]->msix_vector;
 
-       vp_remove_vqs(vdev);
+                       if (v != VIRTIO_MSI_NO_VECTOR) {
+                               int irq = pci_irq_vector(vp_dev->pci_dev, v);
+
+                               irq_set_affinity_hint(irq, NULL);
+                               free_irq(irq, vq);
+                       }
+               }
+               vp_del_vq(vq);
+       }
+       vp_dev->per_vq_vectors = false;
+
+       if (vp_dev->intx_enabled) {
+               free_irq(vp_dev->pci_dev->irq, vp_dev);
+               vp_dev->intx_enabled = 0;
+       }
 
-       if (vp_dev->pci_dev->msix_enabled) {
-               for (i = 0; i < vp_dev->msix_vectors; i++)
+       for (i = 0; i < vp_dev->msix_used_vectors; ++i)
+               free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
+
+       for (i = 0; i < vp_dev->msix_vectors; i++)
+               if (vp_dev->msix_affinity_masks[i])
                        free_cpumask_var(vp_dev->msix_affinity_masks[i]);
 
+       if (vp_dev->msix_enabled) {
                /* Disable the vector used for configuration */
                vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
 
-               kfree(vp_dev->msix_affinity_masks);
-               kfree(vp_dev->msix_names);
-               kfree(vp_dev->msix_vector_map);
+               pci_free_irq_vectors(vp_dev->pci_dev);
+               vp_dev->msix_enabled = 0;
        }
 
-       free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
-       pci_free_irq_vectors(vp_dev->pci_dev);
+       vp_dev->msix_vectors = 0;
+       vp_dev->msix_used_vectors = 0;
+       kfree(vp_dev->msix_names);
+       vp_dev->msix_names = NULL;
+       kfree(vp_dev->msix_affinity_masks);
+       vp_dev->msix_affinity_masks = NULL;
+       kfree(vp_dev->vqs);
+       vp_dev->vqs = NULL;
 }
 
 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
-               const char * const names[], struct irq_affinity *desc)
+               const char * const names[], bool per_vq_vectors,
+               struct irq_affinity *desc)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       const char *name = dev_name(&vp_dev->vdev.dev);
-       int i, j, err = -ENOMEM, allocated_vectors, nvectors;
-       unsigned flags = PCI_IRQ_MSIX;
-       bool shared = false;
        u16 msix_vec;
+       int i, err, nvectors, allocated_vectors;
 
-       if (desc) {
-               flags |= PCI_IRQ_AFFINITY;
-               desc->pre_vectors++; /* virtio config vector */
-       }
-
-       nvectors = 1;
-       for (i = 0; i < nvqs; i++)
-               if (callbacks[i])
-                       nvectors++;
-
-       /* Try one vector per queue first. */
-       err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
-                       nvectors, flags, desc);
-       if (err < 0) {
-               /* Fallback to one vector for config, one shared for queues. */
-               shared = true;
-               err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2,
-                               PCI_IRQ_MSIX);
-               if (err < 0)
-                       return err;
-       }
-       if (err < 0)
-               return err;
-
-       vp_dev->msix_vectors = nvectors;
-       vp_dev->msix_names = kmalloc_array(nvectors,
-                       sizeof(*vp_dev->msix_names), GFP_KERNEL);
-       if (!vp_dev->msix_names)
-               goto out_free_irq_vectors;
-
-       vp_dev->msix_affinity_masks = kcalloc(nvectors,
-                       sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
-       if (!vp_dev->msix_affinity_masks)
-               goto out_free_msix_names;
+       vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+       if (!vp_dev->vqs)
+               return -ENOMEM;
 
-       for (i = 0; i < nvectors; ++i) {
-               if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
-                               GFP_KERNEL))
-                       goto out_free_msix_affinity_masks;
+       if (per_vq_vectors) {
+               /* Best option: one for change interrupt, one per vq. */
+               nvectors = 1;
+               for (i = 0; i < nvqs; ++i)
+                       if (callbacks[i])
+                               ++nvectors;
+       } else {
+               /* Second best: one for change, shared for all vqs. */
+               nvectors = 2;
        }
 
-       /* Set the vector used for configuration */
-       snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names),
-                "%s-config", name);
-       err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
-                       0, vp_dev->msix_names[0], vp_dev);
+       err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
+                                     per_vq_vectors ? desc : NULL);
        if (err)
-               goto out_free_msix_affinity_masks;
+               goto error_find;
 
-       /* Verify we had enough resources to assign the vector */
-       if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) {
-               err = -EBUSY;
-               goto out_free_config_irq;
-       }
-
-       vp_dev->msix_vector_map = kmalloc_array(nvqs,
-                       sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
-       if (!vp_dev->msix_vector_map)
-               goto out_disable_config_irq;
-
-       allocated_vectors = j = 1; /* vector 0 is the config interrupt */
+       vp_dev->per_vq_vectors = per_vq_vectors;
+       allocated_vectors = vp_dev->msix_used_vectors;
        for (i = 0; i < nvqs; ++i) {
                if (!names[i]) {
                        vqs[i] = NULL;
                        continue;
                }
 
-               if (callbacks[i])
-                       msix_vec = allocated_vectors;
-               else
+               if (!callbacks[i])
                        msix_vec = VIRTIO_MSI_NO_VECTOR;
-
-               vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
-                               msix_vec);
+               else if (vp_dev->per_vq_vectors)
+                       msix_vec = allocated_vectors++;
+               else
+                       msix_vec = VP_MSIX_VQ_VECTOR;
+               vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+                                    msix_vec);
                if (IS_ERR(vqs[i])) {
                        err = PTR_ERR(vqs[i]);
-                       goto out_remove_vqs;
+                       goto error_find;
                }
 
-               if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
-                       vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
+               if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
                        continue;
-               }
 
-               snprintf(vp_dev->msix_names[j],
-                        sizeof(*vp_dev->msix_names), "%s-%s",
+               /* allocate per-vq irq if available and necessary */
+               snprintf(vp_dev->msix_names[msix_vec],
+                        sizeof *vp_dev->msix_names,
+                        "%s-%s",
                         dev_name(&vp_dev->vdev.dev), names[i]);
                err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
-                                 vring_interrupt, IRQF_SHARED,
-                                 vp_dev->msix_names[j], vqs[i]);
-               if (err) {
-                       /* don't free this irq on error */
-                       vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
-                       goto out_remove_vqs;
-               }
-               vp_dev->msix_vector_map[i] = msix_vec;
-               j++;
-
-               /*
-                * Use a different vector for each queue if they are available,
-                * else share the same vector for all VQs.
-                */
-               if (!shared)
-                       allocated_vectors++;
+                                 vring_interrupt, 0,
+                                 vp_dev->msix_names[msix_vec],
+                                 vqs[i]);
+               if (err)
+                       goto error_find;
        }
-
        return 0;
 
-out_remove_vqs:
-       vp_remove_vqs(vdev);
-       kfree(vp_dev->msix_vector_map);
-out_disable_config_irq:
-       vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
-out_free_config_irq:
-       free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
-out_free_msix_affinity_masks:
-       for (i = 0; i < nvectors; i++) {
-               if (vp_dev->msix_affinity_masks[i])
-                       free_cpumask_var(vp_dev->msix_affinity_masks[i]);
-       }
-       kfree(vp_dev->msix_affinity_masks);
-out_free_msix_names:
-       kfree(vp_dev->msix_names);
-out_free_irq_vectors:
-       pci_free_irq_vectors(vp_dev->pci_dev);
+error_find:
+       vp_del_vqs(vdev);
        return err;
 }
 
@@ -287,29 +350,33 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        int i, err;
 
+       vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+       if (!vp_dev->vqs)
+               return -ENOMEM;
+
        err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
                        dev_name(&vdev->dev), vp_dev);
        if (err)
-               return err;
+               goto out_del_vqs;
 
+       vp_dev->intx_enabled = 1;
+       vp_dev->per_vq_vectors = false;
        for (i = 0; i < nvqs; ++i) {
                if (!names[i]) {
                        vqs[i] = NULL;
                        continue;
                }
-               vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
-                               VIRTIO_MSI_NO_VECTOR);
+               vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+                                    VIRTIO_MSI_NO_VECTOR);
                if (IS_ERR(vqs[i])) {
                        err = PTR_ERR(vqs[i]);
-                       goto out_remove_vqs;
+                       goto out_del_vqs;
                }
        }
 
        return 0;
-
-out_remove_vqs:
-       vp_remove_vqs(vdev);
-       free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
+out_del_vqs:
+       vp_del_vqs(vdev);
        return err;
 }
 
@@ -320,9 +387,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 {
        int err;
 
-       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc);
+       /* Try MSI-X with one vector per queue. */
+       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
        if (!err)
                return 0;
+       /* Fallback: MSI-X with one vector for config, one shared for queues. */
+       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
+       if (!err)
+               return 0;
+       /* Finally fall back to regular interrupts. */
        return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
 }
 
@@ -342,15 +415,16 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
 {
        struct virtio_device *vdev = vq->vdev;
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
+       struct cpumask *mask;
+       unsigned int irq;
 
        if (!vq->callback)
                return -EINVAL;
 
-       if (vp_dev->pci_dev->msix_enabled) {
-               int vec = vp_dev->msix_vector_map[vq->index];
-               struct cpumask *mask = vp_dev->msix_affinity_masks[vec];
-               unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
-
+       if (vp_dev->msix_enabled) {
+               mask = vp_dev->msix_affinity_masks[info->msix_vector];
+               irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
                if (cpu == -1)
                        irq_set_affinity_hint(irq, NULL);
                else {
@@ -365,12 +439,13 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       unsigned int *map = vp_dev->msix_vector_map;
 
-       if (!map || map[index] == VIRTIO_MSI_NO_VECTOR)
+       if (!vp_dev->per_vq_vectors ||
+           vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
                return NULL;
 
-       return pci_irq_get_affinity(vp_dev->pci_dev, map[index]);
+       return pci_irq_get_affinity(vp_dev->pci_dev,
+                                   vp_dev->vqs[index]->msix_vector);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -441,6 +516,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
        vp_dev->vdev.dev.parent = &pci_dev->dev;
        vp_dev->vdev.dev.release = virtio_pci_release_dev;
        vp_dev->pci_dev = pci_dev;
+       INIT_LIST_HEAD(&vp_dev->virtqueues);
+       spin_lock_init(&vp_dev->lock);
 
        /* enable the device */
        rc = pci_enable_device(pci_dev);
index ac8c9d7889646ab3cc28bb51accd0d3840d5a34f..e96334aec1e0d70842d1a9fc53462ab728be87c0 100644 (file)
 #include <linux/highmem.h>
 #include <linux/spinlock.h>
 
+struct virtio_pci_vq_info {
+       /* the actual virtqueue */
+       struct virtqueue *vq;
+
+       /* the list node for the virtqueues list */
+       struct list_head node;
+
+       /* MSI-X vector (or none) */
+       unsigned msix_vector;
+};
+
 /* Our device structure */
 struct virtio_pci_device {
        struct virtio_device vdev;
@@ -64,25 +75,47 @@ struct virtio_pci_device {
        /* the IO mapping for the PCI config space */
        void __iomem *ioaddr;
 
+       /* a list of queues so we can dispatch IRQs */
+       spinlock_t lock;
+       struct list_head virtqueues;
+
+       /* array of all queues for house-keeping */
+       struct virtio_pci_vq_info **vqs;
+
+       /* MSI-X support */
+       int msix_enabled;
+       int intx_enabled;
        cpumask_var_t *msix_affinity_masks;
        /* Name strings for interrupts. This size should be enough,
         * and I'm too lazy to allocate each name separately. */
        char (*msix_names)[256];
-       /* Total Number of MSI-X vectors (including per-VQ ones). */
-       int msix_vectors;
-       /* Map of per-VQ MSI-X vectors, may be NULL */
-       unsigned *msix_vector_map;
+       /* Number of available vectors */
+       unsigned msix_vectors;
+       /* Vectors allocated, excluding per-vq vectors if any */
+       unsigned msix_used_vectors;
+
+       /* Whether we have vector per vq */
+       bool per_vq_vectors;
 
        struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
+                                     struct virtio_pci_vq_info *info,
                                      unsigned idx,
                                      void (*callback)(struct virtqueue *vq),
                                      const char *name,
                                      u16 msix_vec);
-       void (*del_vq)(struct virtqueue *vq);
+       void (*del_vq)(struct virtio_pci_vq_info *info);
 
        u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
 };
 
+/* Constants for MSI-X */
+/* Use first vector for configuration changes, second and the rest for
+ * virtqueues Thus, we need at least 2 vectors for MSI. */
+enum {
+       VP_MSIX_CONFIG_VECTOR = 0,
+       VP_MSIX_VQ_VECTOR = 1,
+};
+
 /* Convert a generic virtio device to our structure */
 static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
 {
index f7362c5fe18a96a902bc81138b8ea796e03e79d9..4bfa48fb1324660f82ae6272d2e1ecc33522ba3e 100644 (file)
@@ -112,6 +112,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 }
 
 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+                                 struct virtio_pci_vq_info *info,
                                  unsigned index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name,
@@ -129,6 +130,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
                return ERR_PTR(-ENOENT);
 
+       info->msix_vector = msix_vec;
+
        /* create the vring */
        vq = vring_create_virtqueue(index, num,
                                    VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
@@ -159,13 +162,14 @@ out_deactivate:
        return ERR_PTR(err);
 }
 
-static void del_vq(struct virtqueue *vq)
+static void del_vq(struct virtio_pci_vq_info *info)
 {
+       struct virtqueue *vq = info->vq;
        struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
 
        iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
 
-       if (vp_dev->pci_dev->msix_enabled) {
+       if (vp_dev->msix_enabled) {
                iowrite16(VIRTIO_MSI_NO_VECTOR,
                          vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
                /* Flush the write out to device */
index 7bc3004b840ef3e3dabb5c2e24af8a935f552eba..8978f109d2d79828e5b0c12649debc481dfacd7f 100644 (file)
@@ -293,6 +293,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 }
 
 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+                                 struct virtio_pci_vq_info *info,
                                  unsigned index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name,
@@ -322,6 +323,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        /* get offset of notification word for this vq */
        off = vp_ioread16(&cfg->queue_notify_off);
 
+       info->msix_vector = msix_vec;
+
        /* create the vring */
        vq = vring_create_virtqueue(index, num,
                                    SMP_CACHE_BYTES, &vp_dev->vdev,
@@ -405,13 +408,14 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
        return 0;
 }
 
-static void del_vq(struct virtqueue *vq)
+static void del_vq(struct virtio_pci_vq_info *info)
 {
+       struct virtqueue *vq = info->vq;
        struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
 
        vp_iowrite16(vq->index, &vp_dev->common->queue_select);
 
-       if (vp_dev->pci_dev->msix_enabled) {
+       if (vp_dev->msix_enabled) {
                vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
                             &vp_dev->common->queue_msix_vector);
                /* Flush the write out to device */
index a18510be76c141e5d4b4d687c2eb5498cc273c56..5e71f1ea3391b034dc8e6f55f62d82dbe76e9811 100644 (file)
@@ -7910,7 +7910,6 @@ struct btrfs_retry_complete {
 static void btrfs_retry_endio_nocsum(struct bio *bio)
 {
        struct btrfs_retry_complete *done = bio->bi_private;
-       struct inode *inode;
        struct bio_vec *bvec;
        int i;
 
@@ -7918,12 +7917,12 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
                goto end;
 
        ASSERT(bio->bi_vcnt == 1);
-       inode = bio->bi_io_vec->bv_page->mapping->host;
-       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
+       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
 
        done->uptodate = 1;
        bio_for_each_segment_all(bvec, bio, i)
-       clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0);
+               clean_io_failure(BTRFS_I(done->inode), done->start,
+                                bvec->bv_page, 0);
 end:
        complete(&done->done);
        bio_put(bio);
@@ -7973,8 +7972,10 @@ next_block_or_try_again:
 
                start += sectorsize;
 
-               if (nr_sectors--) {
+               nr_sectors--;
+               if (nr_sectors) {
                        pgoff += sectorsize;
+                       ASSERT(pgoff < PAGE_SIZE);
                        goto next_block_or_try_again;
                }
        }
@@ -7986,9 +7987,7 @@ static void btrfs_retry_endio(struct bio *bio)
 {
        struct btrfs_retry_complete *done = bio->bi_private;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
-       struct inode *inode;
        struct bio_vec *bvec;
-       u64 start;
        int uptodate;
        int ret;
        int i;
@@ -7998,11 +7997,8 @@ static void btrfs_retry_endio(struct bio *bio)
 
        uptodate = 1;
 
-       start = done->start;
-
        ASSERT(bio->bi_vcnt == 1);
-       inode = bio->bi_io_vec->bv_page->mapping->host;
-       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
+       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
 
        bio_for_each_segment_all(bvec, bio, i) {
                ret = __readpage_endio_check(done->inode, io_bio, i,
@@ -8080,8 +8076,10 @@ next:
 
                ASSERT(nr_sectors);
 
-               if (--nr_sectors) {
+               nr_sectors--;
+               if (nr_sectors) {
                        pgoff += sectorsize;
+                       ASSERT(pgoff < PAGE_SIZE);
                        goto next_block;
                }
        }
index a59801dc2a340bcd3c5a34af9ef7277061967377..afbea61d957e893db09effb75ec47c7d670e24e3 100644 (file)
@@ -1042,9 +1042,12 @@ static void report_reserved_underflow(struct btrfs_fs_info *fs_info,
                                      struct btrfs_qgroup *qgroup,
                                      u64 num_bytes)
 {
-       btrfs_warn(fs_info,
+#ifdef CONFIG_BTRFS_DEBUG
+       WARN_ON(qgroup->reserved < num_bytes);
+       btrfs_debug(fs_info,
                "qgroup %llu reserved space underflow, have: %llu, to free: %llu",
                qgroup->qgroupid, qgroup->reserved, num_bytes);
+#endif
        qgroup->reserved = 0;
 }
 /*
@@ -1075,7 +1078,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
        qgroup->excl += sign * num_bytes;
        qgroup->excl_cmpr += sign * num_bytes;
        if (sign > 0) {
-               if (WARN_ON(qgroup->reserved < num_bytes))
+               if (qgroup->reserved < num_bytes)
                        report_reserved_underflow(fs_info, qgroup, num_bytes);
                else
                        qgroup->reserved -= num_bytes;
@@ -1100,7 +1103,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
                WARN_ON(sign < 0 && qgroup->excl < num_bytes);
                qgroup->excl += sign * num_bytes;
                if (sign > 0) {
-                       if (WARN_ON(qgroup->reserved < num_bytes))
+                       if (qgroup->reserved < num_bytes)
                                report_reserved_underflow(fs_info, qgroup,
                                                          num_bytes);
                        else
@@ -2469,7 +2472,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
 
                qg = unode_aux_to_qgroup(unode);
 
-               if (WARN_ON(qg->reserved < num_bytes))
+               if (qg->reserved < num_bytes)
                        report_reserved_underflow(fs_info, qg, num_bytes);
                else
                        qg->reserved -= num_bytes;
index da687dc79cce6155a278038a15775637c97ce3cc..9530a333d302c0c13c3e8463814b642a023afc23 100644 (file)
@@ -549,16 +549,19 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_ssd:
                        btrfs_set_and_info(info, SSD,
                                           "use ssd allocation scheme");
+                       btrfs_clear_opt(info->mount_opt, NOSSD);
                        break;
                case Opt_ssd_spread:
                        btrfs_set_and_info(info, SSD_SPREAD,
                                           "use spread ssd allocation scheme");
                        btrfs_set_opt(info->mount_opt, SSD);
+                       btrfs_clear_opt(info->mount_opt, NOSSD);
                        break;
                case Opt_nossd:
                        btrfs_set_and_info(info, NOSSD,
                                             "not using ssd allocation scheme");
                        btrfs_clear_opt(info->mount_opt, SSD);
+                       btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
                        break;
                case Opt_barrier:
                        btrfs_clear_and_info(info, NOBARRIER,
index 73d56eef5e60f311225b06ad7adccedeed54a0db..ab8a66d852f91cb04206361a551b8c57760c9c40 100644 (file)
@@ -6213,7 +6213,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
        for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
                dev = bbio->stripes[dev_nr].dev;
                if (!dev || !dev->bdev ||
-                   (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) {
+                   (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
                        bbio_error(bbio, first_bio, logical);
                        continue;
                }
index d449e1c03cbd791922148ad00c3d6d0f0e1599ce..d3119fe3ab45fdbdb534651ef68194815dcc544b 100644 (file)
@@ -2071,11 +2071,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
        if (inode_dirty_flags)
                __mark_inode_dirty(inode, inode_dirty_flags);
 
-       if (ia_valid & ATTR_MODE) {
-               err = posix_acl_chmod(inode, attr->ia_mode);
-               if (err)
-                       goto out_put;
-       }
 
        if (mask) {
                req->r_inode = inode;
@@ -2088,14 +2083,12 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
        dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
             ceph_cap_string(dirtied), mask);
 
-       ceph_mdsc_put_request(req);
-       if (mask & CEPH_SETATTR_SIZE)
-               __ceph_do_pending_vmtruncate(inode);
-       ceph_free_cap_flush(prealloc_cf);
-       return err;
-out_put:
        ceph_mdsc_put_request(req);
        ceph_free_cap_flush(prealloc_cf);
+
+       if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
+               __ceph_do_pending_vmtruncate(inode);
+
        return err;
 }
 
@@ -2114,7 +2107,12 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        if (err != 0)
                return err;
 
-       return __ceph_setattr(inode, attr);
+       err = __ceph_setattr(inode, attr);
+
+       if (err >= 0 && (attr->ia_valid & ATTR_MODE))
+               err = posix_acl_chmod(inode, attr->ia_mode);
+
+       return err;
 }
 
 /*
index d07f13a63369df6a177a2236b9498cfefee3cb97..37f5a41cc50cc523cd76c790100398d4db9e80ca 100644 (file)
@@ -948,7 +948,6 @@ struct cifs_tcon {
        bool use_persistent:1; /* use persistent instead of durable handles */
 #ifdef CONFIG_CIFS_SMB2
        bool print:1;           /* set if connection to printer share */
-       bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
        __le32 capabilities;
        __u32 share_flags;
        __u32 maximal_access;
index ec5e5e514fdd4a2556fa15974e869cdba5e3bd91..97e5d236d26559806ca8bc278f7c248e0579ef77 100644 (file)
@@ -79,8 +79,7 @@ extern void cifs_delete_mid(struct mid_q_entry *mid);
 extern void cifs_wake_up_task(struct mid_q_entry *mid);
 extern int cifs_handle_standard(struct TCP_Server_Info *server,
                                struct mid_q_entry *mid);
-extern int cifs_discard_remaining_data(struct TCP_Server_Info *server,
-                                      char *buf);
+extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
 extern int cifs_call_async(struct TCP_Server_Info *server,
                        struct smb_rqst *rqst,
                        mid_receive_t *receive, mid_callback_t *callback,
index 967b92631807a95786a264acf80833f0e0dbb147..5d21f00ae341b4ec002eb33dd79c7a16a1e1dfa4 100644 (file)
@@ -1400,9 +1400,9 @@ openRetry:
  * current bigbuf.
  */
 int
-cifs_discard_remaining_data(struct TCP_Server_Info *server, char *buf)
+cifs_discard_remaining_data(struct TCP_Server_Info *server)
 {
-       unsigned int rfclen = get_rfc1002_length(buf);
+       unsigned int rfclen = get_rfc1002_length(server->smallbuf);
        int remaining = rfclen + 4 - server->total_read;
 
        while (remaining > 0) {
@@ -1426,8 +1426,10 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        int length;
        struct cifs_readdata *rdata = mid->callback_data;
 
-       length = cifs_discard_remaining_data(server, mid->resp_buf);
+       length = cifs_discard_remaining_data(server);
        dequeue_mid(mid, rdata->result);
+       mid->resp_buf = server->smallbuf;
+       server->smallbuf = NULL;
        return length;
 }
 
@@ -1459,7 +1461,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 
        if (server->ops->is_status_pending &&
            server->ops->is_status_pending(buf, server, 0)) {
-               cifs_discard_remaining_data(server, buf);
+               cifs_discard_remaining_data(server);
                return -1;
        }
 
@@ -1519,9 +1521,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
                 rdata->iov[0].iov_base, server->total_read);
 
-       mid->resp_buf = server->smallbuf;
-       server->smallbuf = NULL;
-
        /* how much data is in the response? */
        data_len = server->ops->read_data_length(buf);
        if (data_offset + data_len > buflen) {
@@ -1544,6 +1543,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                return cifs_readv_discard(server, mid);
 
        dequeue_mid(mid, false);
+       mid->resp_buf = server->smallbuf;
+       server->smallbuf = NULL;
        return length;
 }
 
index 0c7596cef4b88639c50b3efdb86a511f349ac451..d82467cfb0e2df5ef7e655bc601f4f17f3b890b1 100644 (file)
@@ -3753,6 +3753,9 @@ try_mount_again:
        if (IS_ERR(tcon)) {
                rc = PTR_ERR(tcon);
                tcon = NULL;
+               if (rc == -EACCES)
+                       goto mount_fail_check;
+
                goto remote_path_check;
        }
 
index aa3debbba82648944a1e1426516abebb268b7ca0..21d4045357397de647b8fe0282ed1a436b7e3a7f 100644 (file)
@@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
                wdata->credits = credits;
 
                if (!wdata->cfile->invalidHandle ||
-                   !cifs_reopen_file(wdata->cfile, false))
+                   !(rc = cifs_reopen_file(wdata->cfile, false)))
                        rc = server->ops->async_writev(wdata,
                                        cifs_uncached_writedata_release);
                if (rc) {
@@ -3022,7 +3022,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
                rdata->credits = credits;
 
                if (!rdata->cfile->invalidHandle ||
-                   !cifs_reopen_file(rdata->cfile, true))
+                   !(rc = cifs_reopen_file(rdata->cfile, true)))
                        rc = server->ops->async_readv(rdata);
 error:
                if (rc) {
@@ -3617,7 +3617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                }
 
                if (!rdata->cfile->invalidHandle ||
-                   !cifs_reopen_file(rdata->cfile, true))
+                   !(rc = cifs_reopen_file(rdata->cfile, true)))
                        rc = server->ops->async_readv(rdata);
                if (rc) {
                        add_credits_and_wake_if(server, rdata->credits, 0);
index cc93ba4da9b592468f36d37b80777e88b8e400e6..27bc360c7ffd7e1081f907c5f080dc4ba439fbfc 100644 (file)
@@ -1015,6 +1015,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
        return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
 }
 
+static bool
+cifs_can_echo(struct TCP_Server_Info *server)
+{
+       if (server->tcpStatus == CifsGood)
+               return true;
+
+       return false;
+}
+
 struct smb_version_operations smb1_operations = {
        .send_cancel = send_nt_cancel,
        .compare_fids = cifs_compare_fids,
@@ -1049,6 +1058,7 @@ struct smb_version_operations smb1_operations = {
        .get_dfs_refer = CIFSGetDFSRefer,
        .qfs_tcon = cifs_qfs_tcon,
        .is_path_accessible = cifs_is_path_accessible,
+       .can_echo = cifs_can_echo,
        .query_path_info = cifs_query_path_info,
        .query_file_info = cifs_query_file_info,
        .get_srv_inum = cifs_get_srv_inum,
index 7b12a727947efc48df3ce8ea7698936d0702da21..152e37f2ad9213462a2ae439296a6edb49d653f9 100644 (file)
@@ -2195,7 +2195,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
        if (rc)
                goto free_pages;
 
-       rc = cifs_discard_remaining_data(server, buf);
+       rc = cifs_discard_remaining_data(server);
        if (rc)
                goto free_pages;
 
@@ -2221,7 +2221,7 @@ free_pages:
        kfree(pages);
        return rc;
 discard_data:
-       cifs_discard_remaining_data(server, buf);
+       cifs_discard_remaining_data(server);
        goto free_pages;
 }
 
index 66fa1b941cdf02f9d837b077ed1f1de3a0a94a79..02da648041fcd58d6e37431a60d596a5bfd4ca06 100644 (file)
@@ -562,8 +562,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
         * but for time being this is our only auth choice so doesn't matter.
         * We just found a server which sets blob length to zero expecting raw.
         */
-       if (blob_length == 0)
+       if (blob_length == 0) {
                cifs_dbg(FYI, "missing security blob on negprot\n");
+               server->sec_ntlmssp = true;
+       }
 
        rc = cifs_enable_signing(server, ses->sign);
        if (rc)
@@ -1171,9 +1173,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        else
                return -EIO;
 
-       if (tcon && tcon->bad_network_name)
-               return -ENOENT;
-
        unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
        if (unc_path == NULL)
                return -ENOMEM;
@@ -1277,8 +1276,6 @@ tcon_exit:
 tcon_error_exit:
        if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
                cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
-               if (tcon)
-                       tcon->bad_network_name = true;
        }
        goto tcon_exit;
 }
@@ -2181,6 +2178,9 @@ void smb2_reconnect_server(struct work_struct *work)
        struct cifs_tcon *tcon, *tcon2;
        struct list_head tmp_list;
        int tcon_exist = false;
+       int rc;
+       int resched = false;
+
 
        /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
        mutex_lock(&server->reconnect_mutex);
@@ -2208,13 +2208,18 @@ void smb2_reconnect_server(struct work_struct *work)
        spin_unlock(&cifs_tcp_ses_lock);
 
        list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
-               if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
+               rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
+               if (!rc)
                        cifs_reopen_persistent_handles(tcon);
+               else
+                       resched = true;
                list_del_init(&tcon->rlist);
                cifs_put_tcon(tcon);
        }
 
        cifs_dbg(FYI, "Reconnecting tcons finished\n");
+       if (resched)
+               queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
        mutex_unlock(&server->reconnect_mutex);
 
        /* now we can safely release srv struct */
index 7163fe014b57f4e15813c1969958d5764b18e5af..dde861387a407810b35f73ff37c3ce6389048326 100644 (file)
@@ -136,17 +136,26 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
        vma->vm_ops = &hugetlb_vm_ops;
 
+       /*
+        * Offset passed to mmap (before page shift) could have been
+        * negative when represented as a (l)off_t.
+        */
+       if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
+               return -EINVAL;
+
        if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
                return -EINVAL;
 
        vma_len = (loff_t)(vma->vm_end - vma->vm_start);
+       len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+       /* check for overflow */
+       if (len < vma_len)
+               return -EINVAL;
 
        inode_lock(inode);
        file_accessed(file);
 
        ret = -ENOMEM;
-       len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
-
        if (hugetlb_reserve_pages(inode,
                                vma->vm_pgoff >> huge_page_order(h),
                                len >> huge_page_shift(h), vma,
@@ -155,7 +164,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
 
        ret = 0;
        if (vma->vm_flags & VM_WRITE && inode->i_size < len)
-               inode->i_size = len;
+               i_size_write(inode, len);
 out:
        inode_unlock(inode);
 
index d41fab78798b2e2510ca4f8b54925ef304a14c7d..19dcf62133cc95162d364f7ef43c17d280ee6448 100644 (file)
@@ -2145,6 +2145,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
        int retval = 0;
        const char *s = nd->name->name;
 
+       if (!*s)
+               flags &= ~LOOKUP_RCU;
+
        nd->last_type = LAST_ROOT; /* if there are only slashes... */
        nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
        nd->depth = 0;
index aab32fc3d6a84965ea879854c942b12888548411..c1b5fed7c863b2b730e46f0139a7b6f9a5f68fa0 100644 (file)
@@ -537,7 +537,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 
        if (put_dreq(dreq))
                nfs_direct_complete(dreq);
-       return 0;
+       return requested_bytes;
 }
 
 /**
@@ -566,7 +566,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
        struct inode *inode = mapping->host;
        struct nfs_direct_req *dreq;
        struct nfs_lock_context *l_ctx;
-       ssize_t result = -EINVAL;
+       ssize_t result = -EINVAL, requested;
        size_t count = iov_iter_count(iter);
        nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 
@@ -600,14 +600,19 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
        nfs_start_io_direct(inode);
 
        NFS_I(inode)->read_io += count;
-       result = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
+       requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
 
        nfs_end_io_direct(inode);
 
-       if (!result) {
+       if (requested > 0) {
                result = nfs_direct_wait(dreq);
-               if (result > 0)
+               if (result > 0) {
+                       requested -= result;
                        iocb->ki_pos += result;
+               }
+               iov_iter_revert(iter, requested);
+       } else {
+               result = requested;
        }
 
 out_release:
@@ -954,7 +959,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 
        if (put_dreq(dreq))
                nfs_direct_write_complete(dreq);
-       return 0;
+       return requested_bytes;
 }
 
 /**
@@ -979,7 +984,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
  */
 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 {
-       ssize_t result = -EINVAL;
+       ssize_t result = -EINVAL, requested;
        size_t count;
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -1022,7 +1027,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 
        nfs_start_io_direct(inode);
 
-       result = nfs_direct_write_schedule_iovec(dreq, iter, pos);
+       requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
 
        if (mapping->nrpages) {
                invalidate_inode_pages2_range(mapping,
@@ -1031,13 +1036,17 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 
        nfs_end_io_direct(inode);
 
-       if (!result) {
+       if (requested > 0) {
                result = nfs_direct_wait(dreq);
                if (result > 0) {
+                       requested -= result;
                        iocb->ki_pos = pos + result;
                        /* XXX: should check the generic_write_sync retval */
                        generic_write_sync(iocb, result);
                }
+               iov_iter_revert(iter, requested);
+       } else {
+               result = requested;
        }
 out_release:
        nfs_direct_req_release(dreq);
index dba2ff8eaa68e3365deac3d61df3da5641099d9e..452334694a5d1f37cc480e5d1cf2873c4246019d 100644 (file)
@@ -358,6 +358,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
 {
        unsigned int len, v, hdr, dlen;
        u32 max_blocksize = svc_max_payload(rqstp);
+       struct kvec *head = rqstp->rq_arg.head;
+       struct kvec *tail = rqstp->rq_arg.tail;
 
        p = decode_fh(p, &args->fh);
        if (!p)
@@ -367,6 +369,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
        args->count = ntohl(*p++);
        args->stable = ntohl(*p++);
        len = args->len = ntohl(*p++);
+       if ((void *)p > head->iov_base + head->iov_len)
+               return 0;
        /*
         * The count must equal the amount of data passed.
         */
@@ -377,9 +381,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
         * Check to make sure that we got the right number of
         * bytes.
         */
-       hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
-       dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
-               + rqstp->rq_arg.tail[0].iov_len - hdr;
+       hdr = (void*)p - head->iov_base;
+       dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr;
        /*
         * Round the length of the data which was specified up to
         * the next multiple of XDR units and then compare that
@@ -396,7 +399,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
                len = args->len = max_blocksize;
        }
        rqstp->rq_vec[0].iov_base = (void*)p;
-       rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+       rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
        v = 0;
        while (len > rqstp->rq_vec[v].iov_len) {
                len -= rqstp->rq_vec[v].iov_len;
@@ -471,6 +474,8 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
        /* first copy and check from the first page */
        old = (char*)p;
        vec = &rqstp->rq_arg.head[0];
+       if ((void *)old > vec->iov_base + vec->iov_len)
+               return 0;
        avail = vec->iov_len - (old - (char*)vec->iov_base);
        while (len && avail && *old) {
                *new++ = *old++;
index cbeeda1e94a2fbbba61e2adeeb4f9ba89287eaf9..d86031b6ad79301c8ca0ceec9c8b991dd5db70f1 100644 (file)
@@ -2489,7 +2489,7 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
 
 int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
 {
-       if (op->opnum == OP_ILLEGAL)
+       if (op->opnum == OP_ILLEGAL || op->status == nfserr_notsupp)
                return op_encode_hdr_size * sizeof(__be32);
 
        BUG_ON(OPDESC(op)->op_rsize_bop == NULL);
index 31e1f959345715a59f8f6b9d9ec00a0ec00fece5..59979f0bbd4bf255f5ea6f8fbd33cb9b2a5aa073 100644 (file)
@@ -747,6 +747,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr)
        return nfserr;
 }
 
+/*
+ * A write procedure can have a large argument, and a read procedure can
+ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
+ * reply that can both be larger than a page.  The xdr code has taken
+ * advantage of this assumption to be a sloppy about bounds checking in
+ * some cases.  Pending a rewrite of the NFSv2/v3 xdr code to fix that
+ * problem, we enforce these assumptions here:
+ */
+static bool nfs_request_too_big(struct svc_rqst *rqstp,
+                               struct svc_procedure *proc)
+{
+       /*
+        * The ACL code has more careful bounds-checking and is not
+        * susceptible to this problem:
+        */
+       if (rqstp->rq_prog != NFS_PROGRAM)
+               return false;
+       /*
+        * Ditto NFSv4 (which can in theory have argument and reply both
+        * more than a page):
+        */
+       if (rqstp->rq_vers >= 4)
+               return false;
+       /* The reply will be small, we're OK: */
+       if (proc->pc_xdrressize > 0 &&
+           proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
+               return false;
+
+       return rqstp->rq_arg.len > PAGE_SIZE;
+}
+
 int
 nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
 {
@@ -759,6 +790,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
                                rqstp->rq_vers, rqstp->rq_proc);
        proc = rqstp->rq_procinfo;
 
+       if (nfs_request_too_big(rqstp, proc)) {
+               dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
+               *statp = rpc_garbage_args;
+               return 1;
+       }
        /*
         * Give the xdr decoder a chance to change this if it wants
         * (necessary in the NFSv4.0 compound case)
index 41b468a6a90f807fe3f3d2e4ceeaa9f8c7ae0f8c..de07ff625777820fefc98bfa56adea81962e8135 100644 (file)
@@ -280,6 +280,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
                                        struct nfsd_writeargs *args)
 {
        unsigned int len, hdr, dlen;
+       struct kvec *head = rqstp->rq_arg.head;
        int v;
 
        p = decode_fh(p, &args->fh);
@@ -300,9 +301,10 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
         * Check to make sure that we got the right number of
         * bytes.
         */
-       hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
-       dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
-               - hdr;
+       hdr = (void*)p - head->iov_base;
+       if (hdr > head->iov_len)
+               return 0;
+       dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
 
        /*
         * Round the length of the data which was specified up to
@@ -316,7 +318,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
                return 0;
 
        rqstp->rq_vec[0].iov_base = (void*)p;
-       rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+       rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
        v = 0;
        while (len > rqstp->rq_vec[v].iov_len) {
                len -= rqstp->rq_vec[v].iov_len;
index 1656843e87d2bef47b69d65b23c23946e8936f33..323f492e0822dd3286365d5cdbe56c59ec2d5463 100644 (file)
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -91,6 +91,7 @@ slow:
                return ERR_PTR(-ENOMEM);
        }
        d_instantiate(dentry, inode);
+       dentry->d_flags |= DCACHE_RCUACCESS;
        dentry->d_fsdata = (void *)ns->ops;
        d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
        if (d) {
index c4ab6fdf17a01426db5d6e2bb9638130147bee61..e1534c9bab16ce69e30eefd6614ca2872595fb04 100644 (file)
@@ -208,14 +208,19 @@ restart:
                                continue;
                        /*
                         * Skip ops whose filesystem we don't know about unless
-                        * it is being mounted.
+                        * it is being mounted or unmounted.  It is possible for
+                        * a filesystem we don't know about to be unmounted if
+                        * it fails to mount in the kernel after userspace has
+                        * been sent the mount request.
                         */
                        /* XXX: is there a better way to detect this? */
                        } else if (ret == -1 &&
                                   !(op->upcall.type ==
                                        ORANGEFS_VFS_OP_FS_MOUNT ||
                                     op->upcall.type ==
-                                       ORANGEFS_VFS_OP_GETATTR)) {
+                                       ORANGEFS_VFS_OP_GETATTR ||
+                                    op->upcall.type ==
+                                       ORANGEFS_VFS_OP_FS_UMOUNT)) {
                                gossip_debug(GOSSIP_DEV_DEBUG,
                                    "orangefs: skipping op tag %llu %s\n",
                                    llu(op->tag), get_opname_string(op));
index 6333cbbdfef7ae652c1a4e6c4d2818ae1cd188d7..83b506020718980a69e5285a8c86496c14a2d050 100644 (file)
@@ -521,13 +521,11 @@ int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter,
                size_t n = size;
                if (n > PAGE_SIZE)
                        n = PAGE_SIZE;
-               n = copy_page_from_iter(page, 0, n, iter);
-               if (!n)
+               if (copy_page_from_iter(page, 0, n, iter) != n)
                        return -EFAULT;
                size -= n;
        }
        return 0;
-
 }
 
 /*
index 5e48a0be976194f466b654fc1aa11dc670cdd084..8afac46fcc87a1e1d3ea8c658e0ae60c336c3d5d 100644 (file)
@@ -249,6 +249,7 @@ struct orangefs_sb_info_s {
        char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
        struct super_block *sb;
        int mount_pending;
+       int no_list;
        struct list_head list;
 };
 
index cd261c8de53a1747fc2ca185d7b4c6f207f5521e..629d8c917fa679886715360fcfe8f6cee1b5a37f 100644 (file)
@@ -493,7 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
 
        if (ret) {
                d = ERR_PTR(ret);
-               goto free_op;
+               goto free_sb_and_op;
        }
 
        /*
@@ -519,6 +519,9 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
        spin_unlock(&orangefs_superblocks_lock);
        op_release(new_op);
 
+       /* Must be removed from the list now. */
+       ORANGEFS_SB(sb)->no_list = 0;
+
        if (orangefs_userspace_version >= 20906) {
                new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
                if (!new_op)
@@ -533,6 +536,10 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
 
        return dget(sb->s_root);
 
+free_sb_and_op:
+       /* Will call orangefs_kill_sb with sb not in list. */
+       ORANGEFS_SB(sb)->no_list = 1;
+       deactivate_locked_super(sb);
 free_op:
        gossip_err("orangefs_mount: mount request failed with %d\n", ret);
        if (ret == -EINVAL) {
@@ -558,12 +565,14 @@ void orangefs_kill_sb(struct super_block *sb)
         */
         orangefs_unmount_sb(sb);
 
-       /* remove the sb from our list of orangefs specific sb's */
-
-       spin_lock(&orangefs_superblocks_lock);
-       __list_del_entry(&ORANGEFS_SB(sb)->list);       /* not list_del_init */
-       ORANGEFS_SB(sb)->list.prev = NULL;
-       spin_unlock(&orangefs_superblocks_lock);
+       if (!ORANGEFS_SB(sb)->no_list) {
+               /* remove the sb from our list of orangefs specific sb's */
+               spin_lock(&orangefs_superblocks_lock);
+               /* not list_del_init */
+               __list_del_entry(&ORANGEFS_SB(sb)->list);
+               ORANGEFS_SB(sb)->list.prev = NULL;
+               spin_unlock(&orangefs_superblocks_lock);
+       }
 
        /*
         * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
index f08bd31c1081cc0536602db9d865f4bf491440c9..312578089544dbd652aac303d0cbabee8fbcb968 100644 (file)
@@ -900,7 +900,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
                unsigned long addr, pmd_t *pmdp)
 {
-       pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
+       pmd_t pmd = *pmdp;
+
+       /* See comment in change_huge_pmd() */
+       pmdp_invalidate(vma, addr, pmdp);
+       if (pmd_dirty(*pmdp))
+               pmd = pmd_mkdirty(pmd);
+       if (pmd_young(*pmdp))
+               pmd = pmd_mkyoung(pmd);
 
        pmd = pmd_wrprotect(pmd);
        pmd = pmd_clear_soft_dirty(pmd);
index c6c963b2546b4f2d0301f40706064498c31fd3b6..a257b872a53d1105a797c75b14f13c31a442a12b 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -547,13 +547,13 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer)
 /**
  * sys_statx - System call to get enhanced stats
  * @dfd: Base directory to pathwalk from *or* fd to stat.
- * @filename: File to stat *or* NULL.
+ * @filename: File to stat or "" with AT_EMPTY_PATH
  * @flags: AT_* flags to control pathwalk.
  * @mask: Parts of statx struct actually required.
  * @buffer: Result buffer.
  *
- * Note that if filename is NULL, then it does the equivalent of fstat() using
- * dfd to indicate the file of interest.
+ * Note that fstat() can be emulated by setting dfd to the fd of interest,
+ * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
  */
 SYSCALL_DEFINE5(statx,
                int, dfd, const char __user *, filename, unsigned, flags,
@@ -568,10 +568,7 @@ SYSCALL_DEFINE5(statx,
        if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
                return -EINVAL;
 
-       if (filename)
-               error = vfs_statx(dfd, filename, flags, &stat, mask);
-       else
-               error = vfs_statx_fd(dfd, &stat, mask, flags);
+       error = vfs_statx(dfd, filename, flags, &stat, mask);
        if (error)
                return error;
 
index 1e712a36468064a75910e8ba9c0e1cfb2bd3ab28..718b749fa11aa8901544a1e6925a7486bbb357f5 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/math64.h>
 #include <linux/uaccess.h>
 #include <linux/random.h>
+#include <linux/ctype.h>
 #include "ubifs.h"
 
 static DEFINE_SPINLOCK(dbg_lock);
@@ -286,8 +287,10 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
                        break;
                }
 
-               pr_err("\t%d: %s (%s)\n",
-                      count++, dent->name, get_dent_type(dent->type));
+               pr_err("\t%d: inode %llu, type %s, len %d\n",
+                      count++, (unsigned long long) le64_to_cpu(dent->inum),
+                      get_dent_type(dent->type),
+                      le16_to_cpu(dent->nlen));
 
                fname_name(&nm) = dent->name;
                fname_len(&nm) = le16_to_cpu(dent->nlen);
@@ -464,7 +467,8 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
                        pr_err("(bad name length, not printing, bad or corrupted node)");
                else {
                        for (i = 0; i < nlen && dent->name[i]; i++)
-                               pr_cont("%c", dent->name[i]);
+                               pr_cont("%c", isprint(dent->name[i]) ?
+                                       dent->name[i] : '?');
                }
                pr_cont("\n");
 
index 30825d882aa94a4c2486d47581f33d1dfca1a409..b777bddaa1dda9f2768952dc562e06df6b068d32 100644 (file)
@@ -606,8 +606,8 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
        }
 
        while (1) {
-               dbg_gen("feed '%s', ino %llu, new f_pos %#x",
-                       dent->name, (unsigned long long)le64_to_cpu(dent->inum),
+               dbg_gen("ino %llu, new f_pos %#x",
+                       (unsigned long long)le64_to_cpu(dent->inum),
                        key_hash_flash(c, &dent->key));
                ubifs_assert(le64_to_cpu(dent->ch.sqnum) >
                             ubifs_inode(dir)->creat_sqnum);
@@ -748,6 +748,11 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
                goto out_fname;
 
        lock_2_inodes(dir, inode);
+
+       /* Handle O_TMPFILE corner case, it is allowed to link a O_TMPFILE. */
+       if (inode->i_nlink == 0)
+               ubifs_delete_orphan(c, inode->i_ino);
+
        inc_nlink(inode);
        ihold(inode);
        inode->i_ctime = ubifs_current_time(inode);
@@ -768,6 +773,8 @@ out_cancel:
        dir->i_size -= sz_change;
        dir_ui->ui_size = dir->i_size;
        drop_nlink(inode);
+       if (inode->i_nlink == 0)
+               ubifs_add_orphan(c, inode->i_ino);
        unlock_2_inodes(dir, inode);
        ubifs_release_budget(c, &req);
        iput(inode);
@@ -1068,8 +1075,10 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
        }
 
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
-       if (err)
+       if (err) {
+               kfree(dev);
                goto out_budg;
+       }
 
        sz_change = CALC_DENT_SIZE(fname_len(&nm));
 
@@ -1316,9 +1325,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
        unsigned int uninitialized_var(saved_nlink);
        struct fscrypt_name old_nm, new_nm;
 
-       if (flags & ~RENAME_NOREPLACE)
-               return -EINVAL;
-
        /*
         * Budget request settings: deletion direntry, new direntry, removing
         * the old inode, and changing old and new parent directory inodes.
index 1d4f365d8f03a439ab0e20caf27ae34ad6e13e74..f6d9af3efa45a6cc8eb448a71f5d43d72d8fcbd1 100644 (file)
@@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance(
        return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
 }
 
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+       req->base.complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+       return req->base.flags;
+}
+
 static inline struct crypto_ahash *crypto_spawn_ahash(
        struct crypto_ahash_spawn *spawn)
 {
index 7548f332121ab733e908474e5e626bb9aecb59e3..01a696b0a4d3ae0118a2742c96d68775ef9f637a 100644 (file)
@@ -1672,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
        return true;
 }
 
-static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
-                        struct bio *next)
+static inline bool bio_will_gap(struct request_queue *q,
+                               struct request *prev_rq,
+                               struct bio *prev,
+                               struct bio *next)
 {
        if (bio_has_data(prev) && queue_virt_boundary(q)) {
                struct bio_vec pb, nb;
 
+               /*
+                * don't merge if the 1st bio starts with non-zero
+                * offset, otherwise it is quite difficult to respect
+                * sg gap limit. We work hard to merge a huge number of small
+                * single bios in case of mkfs.
+                */
+               if (prev_rq)
+                       bio_get_first_bvec(prev_rq->bio, &pb);
+               else
+                       bio_get_first_bvec(prev, &pb);
+               if (pb.bv_offset)
+                       return true;
+
+               /*
+                * We don't need to worry about the situation that the
+                * merged segment ends in unaligned virt boundary:
+                *
+                * - if 'pb' ends aligned, the merged segment ends aligned
+                * - if 'pb' ends unaligned, the next bio must include
+                *   one single bvec of 'nb', otherwise the 'nb' can't
+                *   merge with 'pb'
+                */
                bio_get_last_bvec(prev, &pb);
                bio_get_first_bvec(next, &nb);
 
@@ -1690,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
 
 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
 {
-       return bio_will_gap(req->q, req->biotail, bio);
+       return bio_will_gap(req->q, req, req->biotail, bio);
 }
 
 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
 {
-       return bio_will_gap(req->q, bio, req->bio);
+       return bio_will_gap(req->q, NULL, bio, req->bio);
 }
 
 int kblockd_schedule_work(struct work_struct *work);
index f6b43fbb141c9ad03c1e05880e8c2228743d076e..af9c86e958bdad3be90cfb5f19aad99ede457339 100644 (file)
@@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
        pr_cont_kernfs_path(cgrp->kn);
 }
 
+static inline void cgroup_init_kthreadd(void)
+{
+       /*
+        * kthreadd is inherited by all kthreads, keep it in the root so
+        * that the new kthreads are guaranteed to stay in the root until
+        * initialization is finished.
+        */
+       current->no_cgroup_migration = 1;
+}
+
+static inline void cgroup_kthread_ready(void)
+{
+       /*
+        * This kthread finished initialization.  The creator should have
+        * set PF_NO_SETAFFINITY if this kthread should stay in the root.
+        */
+       current->no_cgroup_migration = 0;
+}
+
 #else /* !CONFIG_CGROUPS */
 
 struct cgroup_subsys_state;
@@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
 
 static inline int cgroup_init_early(void) { return 0; }
 static inline int cgroup_init(void) { return 0; }
+static inline void cgroup_init_kthreadd(void) {}
+static inline void cgroup_kthread_ready(void) {}
 
 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
                                               struct cgroup *ancestor)
index aab032a6ae6124de63b7934f49d0dc564b0d2dc3..97ca105347a6c5e608297ae1a3562925dc6f6834 100644 (file)
@@ -53,7 +53,7 @@ struct sdio_func {
        unsigned int            state;          /* function state */
 #define SDIO_STATE_PRESENT     (1<<0)          /* present in sysfs */
 
-       u8                      tmpbuf[4];      /* DMA:able scratch buffer */
+       u8                      *tmpbuf;        /* DMA:able scratch buffer */
 
        unsigned                num_info;       /* number of info strings */
        const char              **info;         /* info strings */
index 51891fb0d3ce075e9343495e54de6d8d03211ac9..c91b3bcd158f8fa8b2dbc3d4738ade5fdcc600cb 100644 (file)
@@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
        ___pud;                                                         \
 })
 
-#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd)           \
-({                                                                     \
-       unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
-       pmd_t ___pmd;                                                   \
-                                                                       \
-       ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd);         \
-       mmu_notifier_invalidate_range(__mm, ___haddr,                   \
-                                     ___haddr + HPAGE_PMD_SIZE);       \
-                                                                       \
-       ___pmd;                                                         \
-})
-
 /*
  * set_pte_at_notify() sets the pte _after_ running the notifier.
  * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
 #define        ptep_clear_flush_notify ptep_clear_flush
 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
-#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
 #define set_pte_at_notify set_pte_at
 
 #endif /* CONFIG_MMU_NOTIFIER */
index 43a774873aa96d4af64d0cdebb579be572a6658a..fb38573371512338a7876652d95c7f811be1bd3f 100644 (file)
@@ -852,6 +852,7 @@ void phy_change_work(struct work_struct *work);
 void phy_mac_interrupt(struct phy_device *phydev, int new_link);
 void phy_start_machine(struct phy_device *phydev);
 void phy_stop_machine(struct phy_device *phydev);
+void phy_trigger_machine(struct phy_device *phydev, bool sync);
 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 int phy_ethtool_ksettings_get(struct phy_device *phydev,
index 96fb139bdd08fdec3ad83e689ad2808375473126..13d8681210d545ab2dcedb472fa127b408446eae 100644 (file)
@@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc);
 struct reset_control *__of_reset_control_get(struct device_node *node,
                                     const char *id, int index, bool shared,
                                     bool optional);
+struct reset_control *__reset_control_get(struct device *dev, const char *id,
+                                         int index, bool shared,
+                                         bool optional);
 void reset_control_put(struct reset_control *rstc);
 struct reset_control *__devm_reset_control_get(struct device *dev,
                                     const char *id, int index, bool shared,
@@ -72,6 +75,13 @@ static inline struct reset_control *__of_reset_control_get(
        return optional ? NULL : ERR_PTR(-ENOTSUPP);
 }
 
+static inline struct reset_control *__reset_control_get(
+                                       struct device *dev, const char *id,
+                                       int index, bool shared, bool optional)
+{
+       return optional ? NULL : ERR_PTR(-ENOTSUPP);
+}
+
 static inline struct reset_control *__devm_reset_control_get(
                                        struct device *dev, const char *id,
                                        int index, bool shared, bool optional)
@@ -102,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
 #ifndef CONFIG_RESET_CONTROLLER
        WARN_ON(1);
 #endif
-       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
-                                                                       false);
+       return __reset_control_get(dev, id, 0, false, false);
 }
 
 /**
@@ -131,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
 static inline struct reset_control *reset_control_get_shared(
                                        struct device *dev, const char *id)
 {
-       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
-                                                                       false);
+       return __reset_control_get(dev, id, 0, true, false);
 }
 
 static inline struct reset_control *reset_control_get_optional_exclusive(
                                        struct device *dev, const char *id)
 {
-       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
-                                                                       true);
+       return __reset_control_get(dev, id, 0, false, true);
 }
 
 static inline struct reset_control *reset_control_get_optional_shared(
                                        struct device *dev, const char *id)
 {
-       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
-                                                                       true);
+       return __reset_control_get(dev, id, 0, true, true);
 }
 
 /**
index d67eee84fd430f3c44b77d4ba007ec5d2dcabb2b..4cf9a59a4d08ed181f30d7cefa56db92f48408d2 100644 (file)
@@ -604,6 +604,10 @@ struct task_struct {
 #ifdef CONFIG_COMPAT_BRK
        unsigned                        brk_randomized:1;
 #endif
+#ifdef CONFIG_CGROUPS
+       /* disallow userland-initiated cgroup migration */
+       unsigned                        no_cgroup_migration:1;
+#endif
 
        unsigned long                   atomic_flags; /* Flags requiring atomic access. */
 
index 804e34c6f981de7402ba6a39bd0375b3a3dd0efe..f2d36a3d30052db827fab68e9278fc1b693068ca 100644 (file)
@@ -39,7 +39,10 @@ struct iov_iter {
        };
        union {
                unsigned long nr_segs;
-               int idx;
+               struct {
+                       int idx;
+                       int start_idx;
+               };
        };
 };
 
@@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
 size_t iov_iter_copy_from_user_atomic(struct page *page,
                struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
+void iov_iter_revert(struct iov_iter *i, size_t bytes);
 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
index 04b0d3f95043c66856c6a6f4cab078c4791d35e8..7edfbdb55a995d436bf9e999ce202d0ca0bf2550 100644 (file)
@@ -167,6 +167,7 @@ struct virtio_driver {
        unsigned int feature_table_size;
        const unsigned int *feature_table_legacy;
        unsigned int feature_table_size_legacy;
+       int (*validate)(struct virtio_device *dev);
        int (*probe)(struct virtio_device *dev);
        void (*scan)(struct virtio_device *dev);
        void (*remove)(struct virtio_device *dev);
index 4b784b6e21c0d9cb533b31997883d7dd447343bf..ccfad0e9c2cdbd68f13c809c7ed6414b2c0c97c1 100644 (file)
@@ -117,6 +117,7 @@ enum transport_state_table {
        TRANSPORT_ISTATE_PROCESSING = 11,
        TRANSPORT_COMPLETE_QF_WP = 18,
        TRANSPORT_COMPLETE_QF_OK = 19,
+       TRANSPORT_COMPLETE_QF_ERR = 20,
 };
 
 /* Used for struct se_cmd->se_cmd_flags */
@@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp {
        u16     tg_pt_gp_id;
        int     tg_pt_gp_valid_id;
        int     tg_pt_gp_alua_supported_states;
-       int     tg_pt_gp_alua_pending_state;
-       int     tg_pt_gp_alua_previous_state;
        int     tg_pt_gp_alua_access_status;
        int     tg_pt_gp_alua_access_type;
        int     tg_pt_gp_nonop_delay_msecs;
@@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp {
        int     tg_pt_gp_pref;
        int     tg_pt_gp_write_metadata;
        u32     tg_pt_gp_members;
-       atomic_t tg_pt_gp_alua_access_state;
+       int     tg_pt_gp_alua_access_state;
        atomic_t tg_pt_gp_ref_cnt;
        spinlock_t tg_pt_gp_lock;
-       struct mutex tg_pt_gp_md_mutex;
+       struct mutex tg_pt_gp_transition_mutex;
        struct se_device *tg_pt_gp_dev;
        struct config_group tg_pt_gp_group;
        struct list_head tg_pt_gp_list;
        struct list_head tg_pt_gp_lun_list;
        struct se_lun *tg_pt_gp_alua_lun;
        struct se_node_acl *tg_pt_gp_alua_nacl;
-       struct work_struct tg_pt_gp_transition_work;
-       struct completion *tg_pt_gp_transition_complete;
 };
 
 struct t10_vpd {
@@ -705,6 +702,7 @@ struct se_lun {
        u64                     unpacked_lun;
 #define SE_LUN_LINK_MAGIC                      0xffff7771
        u32                     lun_link_magic;
+       bool                    lun_shutdown;
        bool                    lun_access_ro;
        u32                     lun_index;
 
index dd9820b1c7796b87986443124ad18907b8b719c3..f8d9fed17ba99418d858ceb57a864b31a00d078a 100644 (file)
@@ -445,6 +445,7 @@ header-y += unistd.h
 header-y += unix_diag.h
 header-y += usbdevice_fs.h
 header-y += usbip.h
+header-y += userio.h
 header-y += utime.h
 header-y += utsname.h
 header-y += uuid.h
index 85bbb1799df3f93cb1eacbfec497278c2c20c098..d496c02e14bc44327fd3ab6c3faad0c1d7ac1e12 100644 (file)
@@ -35,7 +35,7 @@
 #define RTF_PREF(pref) ((pref) << 27)
 #define RTF_PREF_MASK  0x18000000
 
-#define RTF_PCPU       0x40000000
+#define RTF_PCPU       0x40000000      /* read-only: can not be set by user */
 #define RTF_LOCAL      0x80000000
 
 
index d538897b8e08bb4358c56148c189fc7b8128a9da..17b10304c393355da9ed2da743107a5c59748290 100644 (file)
  * tv_sec holds the number of seconds before (negative) or after (positive)
  * 00:00:00 1st January 1970 UTC.
  *
- * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is
- * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time.
- *
- * Note that if both tv_sec and tv_nsec are non-zero, then the two values must
- * either be both positive or both negative.
+ * tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time.
  *
  * __reserved is held in case we need a yet finer resolution.
  */
 struct statx_timestamp {
        __s64   tv_sec;
-       __s32   tv_nsec;
+       __u32   tv_nsec;
        __s32   __reserved;
 };
 
index 15b4385a2be169e9b99221f31a444820feadae3d..90007a1abcab144ac3d6ac7d6e6f4001d58abb14 100644 (file)
@@ -79,7 +79,7 @@
  * configuration space */
 #define VIRTIO_PCI_CONFIG_OFF(msix_enabled)    ((msix_enabled) ? 24 : 20)
 /* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
-#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled)
+#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled)
 
 /* Virtio ABI version, this must match exactly */
 #define VIRTIO_PCI_ABI_VERSION         0
index 2f4964cfde0b4f142778c199e606dc3d519ebbf0..a871bf80fde1adc79040936475b7045971e72d76 100644 (file)
@@ -160,7 +160,6 @@ static LIST_HEAD(audit_freelist);
 
 /* queue msgs to send via kauditd_task */
 static struct sk_buff_head audit_queue;
-static void kauditd_hold_skb(struct sk_buff *skb);
 /* queue msgs due to temporary unicast send problems */
 static struct sk_buff_head audit_retry_queue;
 /* queue msgs waiting for new auditd connection */
@@ -453,30 +452,6 @@ static void auditd_set(int pid, u32 portid, struct net *net)
        spin_unlock_irqrestore(&auditd_conn.lock, flags);
 }
 
-/**
- * auditd_reset - Disconnect the auditd connection
- *
- * Description:
- * Break the auditd/kauditd connection and move all the queued records into the
- * hold queue in case auditd reconnects.
- */
-static void auditd_reset(void)
-{
-       struct sk_buff *skb;
-
-       /* if it isn't already broken, break the connection */
-       rcu_read_lock();
-       if (auditd_conn.pid)
-               auditd_set(0, 0, NULL);
-       rcu_read_unlock();
-
-       /* flush all of the main and retry queues to the hold queue */
-       while ((skb = skb_dequeue(&audit_retry_queue)))
-               kauditd_hold_skb(skb);
-       while ((skb = skb_dequeue(&audit_queue)))
-               kauditd_hold_skb(skb);
-}
-
 /**
  * kauditd_print_skb - Print the audit record to the ring buffer
  * @skb: audit record
@@ -505,9 +480,6 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
 {
        /* put the record back in the queue at the same place */
        skb_queue_head(&audit_hold_queue, skb);
-
-       /* fail the auditd connection */
-       auditd_reset();
 }
 
 /**
@@ -544,9 +516,6 @@ static void kauditd_hold_skb(struct sk_buff *skb)
        /* we have no other options - drop the message */
        audit_log_lost("kauditd hold queue overflow");
        kfree_skb(skb);
-
-       /* fail the auditd connection */
-       auditd_reset();
 }
 
 /**
@@ -566,6 +535,30 @@ static void kauditd_retry_skb(struct sk_buff *skb)
        skb_queue_tail(&audit_retry_queue, skb);
 }
 
+/**
+ * auditd_reset - Disconnect the auditd connection
+ *
+ * Description:
+ * Break the auditd/kauditd connection and move all the queued records into the
+ * hold queue in case auditd reconnects.
+ */
+static void auditd_reset(void)
+{
+       struct sk_buff *skb;
+
+       /* if it isn't already broken, break the connection */
+       rcu_read_lock();
+       if (auditd_conn.pid)
+               auditd_set(0, 0, NULL);
+       rcu_read_unlock();
+
+       /* flush all of the main and retry queues to the hold queue */
+       while ((skb = skb_dequeue(&audit_retry_queue)))
+               kauditd_hold_skb(skb);
+       while ((skb = skb_dequeue(&audit_queue)))
+               kauditd_hold_skb(skb);
+}
+
 /**
  * auditd_send_unicast_skb - Send a record via unicast to auditd
  * @skb: audit record
@@ -758,6 +751,7 @@ static int kauditd_thread(void *dummy)
                                        NULL, kauditd_rehold_skb);
                if (rc < 0) {
                        sk = NULL;
+                       auditd_reset();
                        goto main_queue;
                }
 
@@ -767,6 +761,7 @@ static int kauditd_thread(void *dummy)
                                        NULL, kauditd_hold_skb);
                if (rc < 0) {
                        sk = NULL;
+                       auditd_reset();
                        goto main_queue;
                }
 
@@ -775,16 +770,18 @@ main_queue:
                 * unicast, dump failed record sends to the retry queue; if
                 * sk == NULL due to previous failures we will just do the
                 * multicast send and move the record to the retry queue */
-               kauditd_send_queue(sk, portid, &audit_queue, 1,
-                                  kauditd_send_multicast_skb,
-                                  kauditd_retry_skb);
+               rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
+                                       kauditd_send_multicast_skb,
+                                       kauditd_retry_skb);
+               if (sk == NULL || rc < 0)
+                       auditd_reset();
+               sk = NULL;
 
                /* drop our netns reference, no auditd sends past this line */
                if (net) {
                        put_net(net);
                        net = NULL;
                }
-               sk = NULL;
 
                /* we have processed all the queues so wake everyone */
                wake_up(&audit_backlog_wait);
index f45827e205d3f491a818a024dca5122c68924697..b4f1cb0c5ac7104c3f12f9ed5c1e3fe159c57824 100644 (file)
@@ -1162,12 +1162,12 @@ out:
        LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
                off = IMM;
 load_word:
-               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
-                * only appearing in the programs where ctx ==
-                * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
-                * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
-                * internal BPF verifier will check that BPF_R6 ==
-                * ctx.
+               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
+                * appearing in the programs where ctx == skb
+                * (see may_access_skb() in the verifier). All programs
+                * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
+                * bpf_convert_filter() saves it in BPF_R6, internal BPF
+                * verifier will check that BPF_R6 == ctx.
                 *
                 * BPF_ABS and BPF_IND are wrappers of function calls,
                 * so they scratch BPF_R1-BPF_R5 registers, preserve
index 7af0dcc5d7555679cea6c08395ab54710e7066e6..821f9e807de5705d5b4d65e502fb13a06d3215bb 100644 (file)
@@ -617,6 +617,14 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
                        if (insn->imm == BPF_FUNC_xdp_adjust_head)
                                prog->xdp_adjust_head = 1;
                        if (insn->imm == BPF_FUNC_tail_call) {
+                               /* If we tail call into other programs, we
+                                * cannot make any assumptions since they
+                                * can be replaced dynamically during runtime
+                                * in the program array.
+                                */
+                               prog->cb_access = 1;
+                               prog->xdp_adjust_head = 1;
+
                                /* mark bpf_tail_call as different opcode
                                 * to avoid conditional branch in
                                 * interpeter for every normal call
index 48851327a15e18e8ba151a3a45c5126c5023ddb8..687f5e0194efccbadce199c0570cd08b8c96181a 100644 (file)
@@ -2425,11 +2425,12 @@ ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
                tsk = tsk->group_leader;
 
        /*
-        * Workqueue threads may acquire PF_NO_SETAFFINITY and become
-        * trapped in a cpuset, or RT worker may be born in a cgroup
-        * with no rt_runtime allocated.  Just say no.
+        * kthreads may acquire PF_NO_SETAFFINITY during initialization.
+        * If userland migrates such a kthread to a non-root cgroup, it can
+        * become trapped in a cpuset, or RT kthread may be born in a
+        * cgroup with no rt_runtime allocated.  Just say no.
         */
-       if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
+       if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
                ret = -EINVAL;
                goto out_unlock_rcu;
        }
index 4544b115f5eb85d4b01ec966f933f191cd2cae1e..e2d356dd75812df8e42dfac3feb132edd5612e33 100644 (file)
@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
 struct cpumask *
 irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
 {
-       int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
+       int n, nodes, cpus_per_vec, extra_vecs, curvec;
        int affv = nvecs - affd->pre_vectors - affd->post_vectors;
        int last_affv = affv + affd->pre_vectors;
        nodemask_t nodemsk = NODE_MASK_NONE;
@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
                goto done;
        }
 
-       /* Spread the vectors per node */
-       vecs_per_node = affv / nodes;
-       /* Account for rounding errors */
-       extra_vecs = affv - (nodes * vecs_per_node);
-
        for_each_node_mask(n, nodemsk) {
-               int ncpus, v, vecs_to_assign = vecs_per_node;
+               int ncpus, v, vecs_to_assign, vecs_per_node;
+
+               /* Spread the vectors per node */
+               vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
 
                /* Get the cpus on this node which are in the mask */
                cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
 
                /* Calculate the number of cpus per vector */
                ncpus = cpumask_weight(nmsk);
+               vecs_to_assign = min(vecs_per_node, ncpus);
+
+               /* Account for rounding errors */
+               extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
 
                for (v = 0; curvec < last_affv && v < vecs_to_assign;
                     curvec++, v++) {
@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
                        /* Account for extra vectors to compensate rounding errors */
                        if (extra_vecs) {
                                cpus_per_vec++;
-                               if (!--extra_vecs)
-                                       vecs_per_node++;
+                               --extra_vecs;
                        }
                        irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
                }
 
                if (curvec >= last_affv)
                        break;
+               --nodes;
        }
 
 done:
index 2f26adea0f84d21f4dae6d1ffdbbf94a73f40c67..26db528c1d881bf371ea5b53b7ade0815c990bf1 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
+#include <linux/cgroup.h>
 #include <trace/events/sched.h>
 
 static DEFINE_SPINLOCK(kthread_create_lock);
@@ -225,6 +226,7 @@ static int kthread(void *_create)
 
        ret = -EINTR;
        if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
+               cgroup_kthread_ready();
                __kthread_parkme(self);
                ret = threadfn(data);
        }
@@ -538,6 +540,7 @@ int kthreadd(void *unused)
        set_mems_allowed(node_states[N_MEMORY]);
 
        current->flags |= PF_NOFREEZE;
+       cgroup_init_kthreadd();
 
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
index c2b88490d857583026a35090b62f7891446b7ba2..c08fbd2f5ba9fa2a806f326a3a85f5d021d74027 100644 (file)
@@ -46,13 +46,13 @@ enum {
                (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
 
 /*
- * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
+ * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
  * .data and .bss to fit in required 32MB limit for the kernel. With
- * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
+ * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
  * So, reduce the static allocations for lockdeps related structures so that
  * everything fits in current required size limit.
  */
-#ifdef CONFIG_PROVE_LOCKING_SMALL
+#ifdef CONFIG_LOCKDEP_SMALL
 /*
  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  * we track.
index f3778e2b46c8dc00c90d9165f6ae9efbf1f16dc7..aea3135c5d90f434ee72980c30f0db1129ef752b 100644 (file)
@@ -34,6 +34,18 @@ void disable_sched_clock_irqtime(void)
        sched_clock_irqtime = 0;
 }
 
+static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
+                                 enum cpu_usage_stat idx)
+{
+       u64 *cpustat = kcpustat_this_cpu->cpustat;
+
+       u64_stats_update_begin(&irqtime->sync);
+       cpustat[idx] += delta;
+       irqtime->total += delta;
+       irqtime->tick_delta += delta;
+       u64_stats_update_end(&irqtime->sync);
+}
+
 /*
  * Called before incrementing preempt_count on {soft,}irq_enter
  * and before decrementing preempt_count on {soft,}irq_exit.
@@ -41,7 +53,6 @@ void disable_sched_clock_irqtime(void)
 void irqtime_account_irq(struct task_struct *curr)
 {
        struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
-       u64 *cpustat = kcpustat_this_cpu->cpustat;
        s64 delta;
        int cpu;
 
@@ -52,22 +63,16 @@ void irqtime_account_irq(struct task_struct *curr)
        delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
        irqtime->irq_start_time += delta;
 
-       u64_stats_update_begin(&irqtime->sync);
        /*
         * We do not account for softirq time from ksoftirqd here.
         * We want to continue accounting softirq time to ksoftirqd thread
         * in that case, so as not to confuse scheduler with a special task
         * that do not consume any time, but still wants to run.
         */
-       if (hardirq_count()) {
-               cpustat[CPUTIME_IRQ] += delta;
-               irqtime->tick_delta += delta;
-       } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
-               cpustat[CPUTIME_SOFTIRQ] += delta;
-               irqtime->tick_delta += delta;
-       }
-
-       u64_stats_update_end(&irqtime->sync);
+       if (hardirq_count())
+               irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
+       else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
+               irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
 }
 EXPORT_SYMBOL_GPL(irqtime_account_irq);
 
index 5cbf92214ad89287d111ab8300e5b55923d83ffe..767aab3505a81d14789382686f9c4ccba095cfaa 100644 (file)
@@ -1869,6 +1869,7 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { }
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 struct irqtime {
+       u64                     total;
        u64                     tick_delta;
        u64                     irq_start_time;
        struct u64_stats_sync   sync;
@@ -1876,16 +1877,20 @@ struct irqtime {
 
 DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
 
+/*
+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
+ * and never move forward.
+ */
 static inline u64 irq_time_read(int cpu)
 {
        struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
-       u64 *cpustat = kcpustat_cpu(cpu).cpustat;
        unsigned int seq;
        u64 total;
 
        do {
                seq = __u64_stats_fetch_begin(&irqtime->sync);
-               total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ];
+               total = irqtime->total;
        } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
 
        return total;
index b9691ee8f6c182cfee1af7308555b9291f3730bd..dd3e91d68dc73053e7ba0ca5bed0681b374ea8fc 100644 (file)
@@ -3755,23 +3755,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
        ftrace_probe_registered = 1;
 }
 
-static void __disable_ftrace_function_probe(void)
+static bool __disable_ftrace_function_probe(void)
 {
        int i;
 
        if (!ftrace_probe_registered)
-               return;
+               return false;
 
        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
                struct hlist_head *hhd = &ftrace_func_hash[i];
                if (hhd->first)
-                       return;
+                       return false;
        }
 
        /* no more funcs left */
        ftrace_shutdown(&trace_probe_ops, 0);
 
        ftrace_probe_registered = 0;
+       return true;
 }
 
 
@@ -3901,6 +3902,7 @@ static void
 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                  void *data, int flags)
 {
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_func_entry *rec_entry;
        struct ftrace_func_probe *entry;
        struct ftrace_func_probe *p;
@@ -3912,6 +3914,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        struct hlist_node *tmp;
        char str[KSYM_SYMBOL_LEN];
        int i, ret;
+       bool disabled;
 
        if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
                func_g.search = NULL;
@@ -3930,6 +3933,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
        mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
+       old_hash_ops.filter_hash = old_hash;
+       /* Probes only have filters */
+       old_hash_ops.notrace_hash = NULL;
+
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash)
                /* Hmm, should report this somehow */
@@ -3967,12 +3974,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                }
        }
        mutex_lock(&ftrace_lock);
-       __disable_ftrace_function_probe();
+       disabled = __disable_ftrace_function_probe();
        /*
         * Remove after the disable is called. Otherwise, if the last
         * probe is removed, a null hash means *all enabled*.
         */
        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+
+       /* still need to update the function call sites */
+       if (ftrace_enabled && !disabled)
+               ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
+                                      &old_hash_ops);
        synchronize_sched();
        if (!ret)
                free_ftrace_hash_rcu(old_hash);
@@ -5554,6 +5566,15 @@ static void clear_ftrace_pids(struct trace_array *tr)
        trace_free_pid_list(pid_list);
 }
 
+void ftrace_clear_pids(struct trace_array *tr)
+{
+       mutex_lock(&ftrace_lock);
+
+       clear_ftrace_pids(tr);
+
+       mutex_unlock(&ftrace_lock);
+}
+
 static void ftrace_pid_reset(struct trace_array *tr)
 {
        mutex_lock(&ftrace_lock);
index 54e7a90db848df3d1bca17e1ca966e57d2ce57d2..ca47a4fa2986c953dffa0b74fc791647bf0409fb 100644 (file)
@@ -3405,11 +3405,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
+       struct buffer_page *reader;
+       struct buffer_page *head_page;
+       struct buffer_page *commit_page;
+       unsigned commit;
 
        cpu_buffer = iter->cpu_buffer;
 
-       return iter->head_page == cpu_buffer->commit_page &&
-               iter->head == rb_commit_index(cpu_buffer);
+       /* Remember, trace recording is off when iterator is in use */
+       reader = cpu_buffer->reader_page;
+       head_page = cpu_buffer->head_page;
+       commit_page = cpu_buffer->commit_page;
+       commit = rb_page_commit(commit_page);
+
+       return ((iter->head_page == commit_page && iter->head == commit) ||
+               (iter->head_page == reader && commit_page == head_page &&
+                head_page->read == commit &&
+                iter->head == rb_page_commit(cpu_buffer->reader_page)));
 }
 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
 
index 0ed834d6beb0feeb68a35a88f048931923970bfe..b253d59b9c518a4c71ad03d2e4940e782dd4e8a4 100644 (file)
@@ -6734,11 +6734,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
                return ret;
 
  out_reg:
-       ret = register_ftrace_function_probe(glob, ops, count);
+       ret = alloc_snapshot(&global_trace);
+       if (ret < 0)
+               goto out;
 
-       if (ret >= 0)
-               alloc_snapshot(&global_trace);
+       ret = register_ftrace_function_probe(glob, ops, count);
 
+ out:
        return ret < 0 ? ret : 0;
 }
 
@@ -7403,6 +7405,7 @@ static int instance_rmdir(const char *name)
 
        tracing_set_nop(tr);
        event_trace_del_tracer(tr);
+       ftrace_clear_pids(tr);
        ftrace_destroy_function_files(tr);
        tracefs_remove_recursive(tr->dir);
        free_trace_buffers(tr);
index ae1cce91fead25a065899109e426a6cc1e597d28..d19d52d600d623e9d9f0676891e19c6e5e880bce 100644 (file)
@@ -896,6 +896,7 @@ int using_ftrace_ops_list_func(void);
 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
                                  struct dentry *d_tracer);
+void ftrace_clear_pids(struct trace_array *tr);
 #else
 static inline int ftrace_trace_task(struct trace_array *tr)
 {
@@ -914,6 +915,7 @@ ftrace_init_global_array_ops(struct trace_array *tr) { }
 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
+static inline void ftrace_clear_pids(struct trace_array *tr) { }
 /* ftace_func_t type is not defined, use macro instead of static inline */
 #define ftrace_init_array_ops(tr, func) do { } while (0)
 #endif /* CONFIG_FUNCTION_TRACER */
index 97d62c2da6c25dd5721f8c1c75264c83201f7247..fa16c0f82d6e4c159ac4b8751a1fc52631974438 100644 (file)
@@ -1103,9 +1103,6 @@ config PROVE_LOCKING
 
         For more details, see Documentation/locking/lockdep-design.txt.
 
-config PROVE_LOCKING_SMALL
-       bool
-
 config LOCKDEP
        bool
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -1114,6 +1111,9 @@ config LOCKDEP
        select KALLSYMS
        select KALLSYMS_ALL
 
+config LOCKDEP_SMALL
+       bool
+
 config LOCK_STAT
        bool "Lock usage statistics"
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
index e68604ae3cedf41ce98bc06de2142629fa115cbd..cc001a542cb55326ba83e69805db3185d0c4301e 100644 (file)
@@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
 }
 EXPORT_SYMBOL(iov_iter_advance);
 
+void iov_iter_revert(struct iov_iter *i, size_t unroll)
+{
+       if (!unroll)
+               return;
+       i->count += unroll;
+       if (unlikely(i->type & ITER_PIPE)) {
+               struct pipe_inode_info *pipe = i->pipe;
+               int idx = i->idx;
+               size_t off = i->iov_offset;
+               while (1) {
+                       size_t n = off - pipe->bufs[idx].offset;
+                       if (unroll < n) {
+                               off -= unroll;
+                               break;
+                       }
+                       unroll -= n;
+                       if (!unroll && idx == i->start_idx) {
+                               off = 0;
+                               break;
+                       }
+                       if (!idx--)
+                               idx = pipe->buffers - 1;
+                       off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
+               }
+               i->iov_offset = off;
+               i->idx = idx;
+               pipe_truncate(i);
+               return;
+       }
+       if (unroll <= i->iov_offset) {
+               i->iov_offset -= unroll;
+               return;
+       }
+       unroll -= i->iov_offset;
+       if (i->type & ITER_BVEC) {
+               const struct bio_vec *bvec = i->bvec;
+               while (1) {
+                       size_t n = (--bvec)->bv_len;
+                       i->nr_segs++;
+                       if (unroll <= n) {
+                               i->bvec = bvec;
+                               i->iov_offset = n - unroll;
+                               return;
+                       }
+                       unroll -= n;
+               }
+       } else { /* same logics for iovec and kvec */
+               const struct iovec *iov = i->iov;
+               while (1) {
+                       size_t n = (--iov)->iov_len;
+                       i->nr_segs++;
+                       if (unroll <= n) {
+                               i->iov = iov;
+                               i->iov_offset = n - unroll;
+                               return;
+                       }
+                       unroll -= n;
+               }
+       }
+}
+EXPORT_SYMBOL(iov_iter_revert);
+
 /*
  * Return the count of just the current iov_iter segment.
  */
@@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
        i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
        i->iov_offset = 0;
        i->count = count;
+       i->start_idx = i->idx;
 }
 EXPORT_SYMBOL(iov_iter_pipe);
 
index fef4cf210cc7f0df1889a01532bb32215d154e60..f3c4f9d22821f889104340332eee93c5e124df4d 100644 (file)
@@ -1568,8 +1568,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                deactivate_page(page);
 
        if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
-               orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
-                       tlb->fullmm);
+               pmdp_invalidate(vma, addr, pmd);
                orig_pmd = pmd_mkold(orig_pmd);
                orig_pmd = pmd_mkclean(orig_pmd);
 
@@ -1724,37 +1723,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 {
        struct mm_struct *mm = vma->vm_mm;
        spinlock_t *ptl;
-       int ret = 0;
+       pmd_t entry;
+       bool preserve_write;
+       int ret;
 
        ptl = __pmd_trans_huge_lock(pmd, vma);
-       if (ptl) {
-               pmd_t entry;
-               bool preserve_write = prot_numa && pmd_write(*pmd);
-               ret = 1;
+       if (!ptl)
+               return 0;
 
-               /*
-                * Avoid trapping faults against the zero page. The read-only
-                * data is likely to be read-cached on the local CPU and
-                * local/remote hits to the zero page are not interesting.
-                */
-               if (prot_numa && is_huge_zero_pmd(*pmd)) {
-                       spin_unlock(ptl);
-                       return ret;
-               }
+       preserve_write = prot_numa && pmd_write(*pmd);
+       ret = 1;
 
-               if (!prot_numa || !pmd_protnone(*pmd)) {
-                       entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
-                       entry = pmd_modify(entry, newprot);
-                       if (preserve_write)
-                               entry = pmd_mk_savedwrite(entry);
-                       ret = HPAGE_PMD_NR;
-                       set_pmd_at(mm, addr, pmd, entry);
-                       BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
-                                       pmd_write(entry));
-               }
-               spin_unlock(ptl);
-       }
+       /*
+        * Avoid trapping faults against the zero page. The read-only
+        * data is likely to be read-cached on the local CPU and
+        * local/remote hits to the zero page are not interesting.
+        */
+       if (prot_numa && is_huge_zero_pmd(*pmd))
+               goto unlock;
+
+       if (prot_numa && pmd_protnone(*pmd))
+               goto unlock;
+
+       /*
+        * In case prot_numa, we are under down_read(mmap_sem). It's critical
+        * to not clear pmd intermittently to avoid race with MADV_DONTNEED
+        * which is also under down_read(mmap_sem):
+        *
+        *      CPU0:                           CPU1:
+        *                              change_huge_pmd(prot_numa=1)
+        *                               pmdp_huge_get_and_clear_notify()
+        * madvise_dontneed()
+        *  zap_pmd_range()
+        *   pmd_trans_huge(*pmd) == 0 (without ptl)
+        *   // skip the pmd
+        *                               set_pmd_at();
+        *                               // pmd is re-established
+        *
+        * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
+        * which may break userspace.
+        *
+        * pmdp_invalidate() is required to make sure we don't miss
+        * dirty/young flags set by hardware.
+        */
+       entry = *pmd;
+       pmdp_invalidate(vma, addr, pmd);
+
+       /*
+        * Recover dirty/young flags.  It relies on pmdp_invalidate to not
+        * corrupt them.
+        */
+       if (pmd_dirty(*pmd))
+               entry = pmd_mkdirty(entry);
+       if (pmd_young(*pmd))
+               entry = pmd_mkyoung(entry);
 
+       entry = pmd_modify(entry, newprot);
+       if (preserve_write)
+               entry = pmd_mk_savedwrite(entry);
+       ret = HPAGE_PMD_NR;
+       set_pmd_at(mm, addr, pmd, entry);
+       BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
+unlock:
+       spin_unlock(ptl);
        return ret;
 }
 
index ed97c2c14fa80b47ffbf7fa22ec6d4b9b57202b1..738f1d5f83503e546960d005a034abf2dde2c0e7 100644 (file)
@@ -184,9 +184,9 @@ void putback_movable_pages(struct list_head *l)
                        unlock_page(page);
                        put_page(page);
                } else {
-                       putback_lru_page(page);
                        dec_node_page_state(page, NR_ISOLATED_ANON +
                                        page_is_file_cache(page));
+                       putback_lru_page(page);
                }
        }
 }
index f3d603cef2c0c0e5aef09540dd2f8d50da5a808c..07efbc3a86567676986105005f77c64f9f99597a 100644 (file)
@@ -1090,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
-       unsigned long nr_scanned, flags;
+       unsigned long nr_scanned;
        bool isolated_pageblocks;
 
-       spin_lock_irqsave(&zone->lock, flags);
+       spin_lock(&zone->lock);
        isolated_pageblocks = has_isolate_pageblock(zone);
        nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
        if (nr_scanned)
@@ -1142,7 +1142,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        trace_mm_page_pcpu_drain(page, 0, mt);
                } while (--count && --batch_free && !list_empty(list));
        }
-       spin_unlock_irqrestore(&zone->lock, flags);
+       spin_unlock(&zone->lock);
 }
 
 static void free_one_page(struct zone *zone,
@@ -1150,9 +1150,8 @@ static void free_one_page(struct zone *zone,
                                unsigned int order,
                                int migratetype)
 {
-       unsigned long nr_scanned, flags;
-       spin_lock_irqsave(&zone->lock, flags);
-       __count_vm_events(PGFREE, 1 << order);
+       unsigned long nr_scanned;
+       spin_lock(&zone->lock);
        nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
        if (nr_scanned)
                __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
@@ -1162,7 +1161,7 @@ static void free_one_page(struct zone *zone,
                migratetype = get_pfnblock_migratetype(page, pfn);
        }
        __free_one_page(page, pfn, zone, order, migratetype);
-       spin_unlock_irqrestore(&zone->lock, flags);
+       spin_unlock(&zone->lock);
 }
 
 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1240,6 +1239,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
 
 static void __free_pages_ok(struct page *page, unsigned int order)
 {
+       unsigned long flags;
        int migratetype;
        unsigned long pfn = page_to_pfn(page);
 
@@ -1247,7 +1247,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
                return;
 
        migratetype = get_pfnblock_migratetype(page, pfn);
+       local_irq_save(flags);
+       __count_vm_events(PGFREE, 1 << order);
        free_one_page(page_zone(page), page, pfn, order, migratetype);
+       local_irq_restore(flags);
 }
 
 static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2219,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        int migratetype, bool cold)
 {
        int i, alloced = 0;
-       unsigned long flags;
 
-       spin_lock_irqsave(&zone->lock, flags);
+       spin_lock(&zone->lock);
        for (i = 0; i < count; ++i) {
                struct page *page = __rmqueue(zone, order, migratetype);
                if (unlikely(page == NULL))
@@ -2257,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
         * pages added to the pcp list.
         */
        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
-       spin_unlock_irqrestore(&zone->lock, flags);
+       spin_unlock(&zone->lock);
        return alloced;
 }
 
@@ -2485,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
 {
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
+       unsigned long flags;
        unsigned long pfn = page_to_pfn(page);
        int migratetype;
 
-       if (in_interrupt()) {
-               __free_pages_ok(page, 0);
-               return;
-       }
-
        if (!free_pcp_prepare(page))
                return;
 
        migratetype = get_pfnblock_migratetype(page, pfn);
        set_pcppage_migratetype(page, migratetype);
-       preempt_disable();
+       local_irq_save(flags);
+       __count_vm_event(PGFREE);
 
        /*
         * We only track unmovable, reclaimable and movable on pcp lists.
@@ -2515,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
                migratetype = MIGRATE_MOVABLE;
        }
 
-       __count_vm_event(PGFREE);
        pcp = &this_cpu_ptr(zone->pageset)->pcp;
        if (!cold)
                list_add(&page->lru, &pcp->lists[migratetype]);
@@ -2529,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
        }
 
 out:
-       preempt_enable();
+       local_irq_restore(flags);
 }
 
 /*
@@ -2654,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
 {
        struct page *page;
 
-       VM_BUG_ON(in_interrupt());
-
        do {
                if (list_empty(list)) {
                        pcp->count += rmqueue_bulk(zone, 0,
@@ -2686,8 +2682,9 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
        struct list_head *list;
        bool cold = ((gfp_flags & __GFP_COLD) != 0);
        struct page *page;
+       unsigned long flags;
 
-       preempt_disable();
+       local_irq_save(flags);
        pcp = &this_cpu_ptr(zone->pageset)->pcp;
        list = &pcp->lists[migratetype];
        page = __rmqueue_pcplist(zone,  migratetype, cold, pcp, list);
@@ -2695,7 +2692,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
                __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
                zone_statistics(preferred_zone, zone);
        }
-       preempt_enable();
+       local_irq_restore(flags);
        return page;
 }
 
@@ -2711,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
        unsigned long flags;
        struct page *page;
 
-       if (likely(order == 0) && !in_interrupt()) {
+       if (likely(order == 0)) {
                page = rmqueue_pcplist(preferred_zone, zone, order,
                                gfp_flags, migratetype);
                goto out;
index 809025ed97ea0eee97573a32ba2764c63ee2dffd..5a4f5c5a31e88ee558f536d22f61f05a3fd13c45 100644 (file)
@@ -1768,8 +1768,7 @@ void __init init_mm_internals(void)
 {
        int ret __maybe_unused;
 
-       mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
-                                      WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+       mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
 
 #ifdef CONFIG_SMP
        ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
index f9492bccfd794a1983eabbc4bff32df35b31cea8..54f63c4a809ae123248200ee84642629c6db8ffc 100644 (file)
@@ -185,6 +185,12 @@ static inline void z3fold_page_lock(struct z3fold_header *zhdr)
        spin_lock(&zhdr->page_lock);
 }
 
+/* Try to lock a z3fold page */
+static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
+{
+       return spin_trylock(&zhdr->page_lock);
+}
+
 /* Unlock a z3fold page */
 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
 {
@@ -385,7 +391,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
                        spin_lock(&pool->lock);
                        zhdr = list_first_entry_or_null(&pool->unbuddied[i],
                                                struct z3fold_header, buddy);
-                       if (!zhdr) {
+                       if (!zhdr || !z3fold_page_trylock(zhdr)) {
                                spin_unlock(&pool->lock);
                                continue;
                        }
@@ -394,7 +400,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
                        spin_unlock(&pool->lock);
 
                        page = virt_to_page(zhdr);
-                       z3fold_page_lock(zhdr);
                        if (zhdr->first_chunks == 0) {
                                if (zhdr->middle_chunks != 0 &&
                                    chunks >= zhdr->start_middle)
index b7ee9c34dbd678fc984db3295a70c117ee2fc2f2..d41edd28298b68ff335e6324df7e4e793a481d93 100644 (file)
@@ -276,7 +276,7 @@ struct zs_pool {
 struct zspage {
        struct {
                unsigned int fullness:FULLNESS_BITS;
-               unsigned int class:CLASS_BITS;
+               unsigned int class:CLASS_BITS + 1;
                unsigned int isolated:ISOLATED_BITS;
                unsigned int magic:MAGIC_VAL_BITS;
        };
index 3ce672af1596cfdb8fbd67ce558366e7997b7695..8e5c6a8d0a3733d03a9c36ca8854f828cb8cc678 100644 (file)
@@ -2101,6 +2101,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
                trace_9p_protocol_dump(clnt, req->rc);
                goto free_and_error;
        }
+       if (rsize < count) {
+               pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
+               count = rsize;
+       }
 
        p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
 
index ea71513fca21a0aea0dd569b482717a1b1dbe673..430b53e7d941def09220a1c97a2e82d288304595 100644 (file)
@@ -119,6 +119,16 @@ static int br_dev_init(struct net_device *dev)
        return err;
 }
 
+static void br_dev_uninit(struct net_device *dev)
+{
+       struct net_bridge *br = netdev_priv(dev);
+
+       br_multicast_dev_del(br);
+       br_multicast_uninit_stats(br);
+       br_vlan_flush(br);
+       free_percpu(br->stats);
+}
+
 static int br_dev_open(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -332,6 +342,7 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_open                = br_dev_open,
        .ndo_stop                = br_dev_stop,
        .ndo_init                = br_dev_init,
+       .ndo_uninit              = br_dev_uninit,
        .ndo_start_xmit          = br_dev_xmit,
        .ndo_get_stats64         = br_get_stats64,
        .ndo_set_mac_address     = br_set_mac_address,
@@ -356,14 +367,6 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_features_check      = passthru_features_check,
 };
 
-static void br_dev_free(struct net_device *dev)
-{
-       struct net_bridge *br = netdev_priv(dev);
-
-       free_percpu(br->stats);
-       free_netdev(dev);
-}
-
 static struct device_type br_type = {
        .name   = "bridge",
 };
@@ -376,7 +379,7 @@ void br_dev_setup(struct net_device *dev)
        ether_setup(dev);
 
        dev->netdev_ops = &br_netdev_ops;
-       dev->destructor = br_dev_free;
+       dev->destructor = free_netdev;
        dev->ethtool_ops = &br_ethtool_ops;
        SET_NETDEV_DEVTYPE(dev, &br_type);
        dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
index 8ac1770aa222f21f89027d303a218c49be9dc650..a8d0ed282a109a1f4075565a0934e742febb387b 100644 (file)
@@ -311,8 +311,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
 
        br_fdb_delete_by_port(br, NULL, 0, 1);
 
-       br_vlan_flush(br);
-       br_multicast_dev_del(br);
        cancel_delayed_work_sync(&br->gc_work);
 
        br_sysfs_delbr(br->dev);
index b760f2620abf320307a65c3f5baf86ff91221545..faa7261a992fa6df54afd4656b0e810102d59c30 100644 (file)
@@ -2031,8 +2031,6 @@ void br_multicast_dev_del(struct net_bridge *br)
 
 out:
        spin_unlock_bh(&br->multicast_lock);
-
-       free_percpu(br->mcast_stats);
 }
 
 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
@@ -2531,6 +2529,11 @@ int br_multicast_init_stats(struct net_bridge *br)
        return 0;
 }
 
+void br_multicast_uninit_stats(struct net_bridge *br)
+{
+       free_percpu(br->mcast_stats);
+}
+
 static void mcast_stats_add_dir(u64 *dst, u64 *src)
 {
        dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
index a8f6acd23e309dcf51e6825076d9b3ba00996a9c..225ef7d5370166baff69080996cf64ff68b055f6 100644 (file)
@@ -1165,11 +1165,14 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
                spin_unlock_bh(&br->lock);
        }
 
-       err = br_changelink(dev, tb, data);
+       err = register_netdevice(dev);
        if (err)
                return err;
 
-       return register_netdevice(dev);
+       err = br_changelink(dev, tb, data);
+       if (err)
+               unregister_netdevice(dev);
+       return err;
 }
 
 static size_t br_get_size(const struct net_device *brdev)
index 61368186edea53841b1f00b37ddaa0d26461aee3..0d177280aa849bf1bc3ba3de79d3a3f3c748d5eb 100644 (file)
@@ -620,6 +620,7 @@ void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
                        const struct sk_buff *skb, u8 type, u8 dir);
 int br_multicast_init_stats(struct net_bridge *br);
+void br_multicast_uninit_stats(struct net_bridge *br);
 void br_multicast_get_stats(const struct net_bridge *br,
                            const struct net_bridge_port *p,
                            struct br_mcast_stats *dest);
@@ -760,6 +761,10 @@ static inline int br_multicast_init_stats(struct net_bridge *br)
        return 0;
 }
 
+static inline void br_multicast_uninit_stats(struct net_bridge *br)
+{
+}
+
 static inline int br_multicast_igmp_type(const struct sk_buff *skb)
 {
        return 0;
index ea633342ab0d046cbc49e55b679440ef9e015c2d..f4947e737f34a0d5dafb2e8232260f46a43fbc27 100644 (file)
@@ -398,7 +398,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
                           struct iov_iter *to, int len)
 {
        int start = skb_headlen(skb);
-       int i, copy = start - offset;
+       int i, copy = start - offset, start_off = offset, n;
        struct sk_buff *frag_iter;
 
        trace_skb_copy_datagram_iovec(skb, len);
@@ -407,11 +407,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
        if (copy > 0) {
                if (copy > len)
                        copy = len;
-               if (copy_to_iter(skb->data + offset, copy, to) != copy)
+               n = copy_to_iter(skb->data + offset, copy, to);
+               offset += n;
+               if (n != copy)
                        goto short_copy;
                if ((len -= copy) == 0)
                        return 0;
-               offset += copy;
        }
 
        /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -425,13 +426,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
                if ((copy = end - offset) > 0) {
                        if (copy > len)
                                copy = len;
-                       if (copy_page_to_iter(skb_frag_page(frag),
+                       n = copy_page_to_iter(skb_frag_page(frag),
                                              frag->page_offset + offset -
-                                             start, copy, to) != copy)
+                                             start, copy, to);
+                       offset += n;
+                       if (n != copy)
                                goto short_copy;
                        if (!(len -= copy))
                                return 0;
-                       offset += copy;
                }
                start = end;
        }
@@ -463,6 +465,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
         */
 
 fault:
+       iov_iter_revert(to, offset - start_off);
        return -EFAULT;
 
 short_copy:
@@ -613,7 +616,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                      __wsum *csump)
 {
        int start = skb_headlen(skb);
-       int i, copy = start - offset;
+       int i, copy = start - offset, start_off = offset;
        struct sk_buff *frag_iter;
        int pos = 0;
        int n;
@@ -623,11 +626,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                if (copy > len)
                        copy = len;
                n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
+               offset += n;
                if (n != copy)
                        goto fault;
                if ((len -= copy) == 0)
                        return 0;
-               offset += copy;
                pos = copy;
        }
 
@@ -649,12 +652,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                                  offset - start, copy,
                                                  &csum2, to);
                        kunmap(page);
+                       offset += n;
                        if (n != copy)
                                goto fault;
                        *csump = csum_block_add(*csump, csum2, pos);
                        if (!(len -= copy))
                                return 0;
-                       offset += copy;
                        pos += copy;
                }
                start = end;
@@ -687,6 +690,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                return 0;
 
 fault:
+       iov_iter_revert(to, offset - start_off);
        return -EFAULT;
 }
 
@@ -771,6 +775,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
        }
        return 0;
 csum_error:
+       iov_iter_revert(&msg->msg_iter, chunk);
        return -EINVAL;
 fault:
        return -EFAULT;
index 7869ae3837ca741e344b1731dc50d8408d8bcb6c..9b5875388c23c4f3306124697fd291c40fb6e6cd 100644 (file)
@@ -2450,6 +2450,9 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
 {
        unsigned long flags;
 
+       if (unlikely(!skb))
+               return;
+
        if (likely(atomic_read(&skb->users) == 1)) {
                smp_rmb();
                atomic_set(&skb->users, 0);
@@ -6757,7 +6760,6 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags)
 
        return err;
 }
-EXPORT_SYMBOL(dev_change_xdp_fd);
 
 /**
  *     dev_new_index   -       allocate an ifindex
index 9424673009c14e0fb288b8e4041dba596b37ee8d..29be2466970cd670daa7a8abdd54929c9af39026 100644 (file)
@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
        while ((skb = skb_dequeue(&npinfo->txq))) {
                struct net_device *dev = skb->dev;
                struct netdev_queue *txq;
+               unsigned int q_index;
 
                if (!netif_device_present(dev) || !netif_running(dev)) {
                        kfree_skb(skb);
                        continue;
                }
 
-               txq = skb_get_tx_queue(dev, skb);
-
                local_irq_save(flags);
+               /* check if skb->queue_mapping is still valid */
+               q_index = skb_get_queue_mapping(skb);
+               if (unlikely(q_index >= dev->real_num_tx_queues)) {
+                       q_index = q_index % dev->real_num_tx_queues;
+                       skb_set_queue_mapping(skb, q_index);
+               }
+               txq = netdev_get_tx_queue(dev, q_index);
                HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (netif_xmit_frozen_or_stopped(txq) ||
                    netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
index 9f781092fda9cb8cac22b0743b4bc7666a3bd91a..f1d04592ace02f32efa6e05df89c9a5e0023157f 100644 (file)
@@ -1576,6 +1576,8 @@ done:
                skb_set_tail_pointer(skb, len);
        }
 
+       if (!skb->sk || skb->destructor == sock_edemux)
+               skb_condense(skb);
        return 0;
 }
 EXPORT_SYMBOL(___pskb_trim);
@@ -3082,22 +3084,32 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
        if (sg && csum && (mss != GSO_BY_FRAGS))  {
                if (!(features & NETIF_F_GSO_PARTIAL)) {
                        struct sk_buff *iter;
+                       unsigned int frag_len;
 
                        if (!list_skb ||
                            !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
                                goto normal;
 
-                       /* Split the buffer at the frag_list pointer.
-                        * This is based on the assumption that all
-                        * buffers in the chain excluding the last
-                        * containing the same amount of data.
+                       /* If we get here then all the required
+                        * GSO features except frag_list are supported.
+                        * Try to split the SKB to multiple GSO SKBs
+                        * with no frag_list.
+                        * Currently we can do that only when the buffers don't
+                        * have a linear part and all the buffers except
+                        * the last are of the same length.
                         */
+                       frag_len = list_skb->len;
                        skb_walk_frags(head_skb, iter) {
+                               if (frag_len != iter->len && iter->next)
+                                       goto normal;
                                if (skb_headlen(iter))
                                        goto normal;
 
                                len -= iter->len;
                        }
+
+                       if (len != frag_len)
+                               goto normal;
                }
 
                /* GSO partial only requires that we trim off any excess that
@@ -3807,6 +3819,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
        serr->ee.ee_info = tstype;
        serr->opt_stats = opt_stats;
+       serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
        if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
                serr->ee.ee_data = skb_shinfo(skb)->tskey;
                if (sk->sk_protocol == IPPROTO_TCP &&
index 6b1fc6e4278ef4f1cba58412977918af31d73e62..13a9a3297eae3ac48a77214e9365657202d44f08 100644 (file)
@@ -1343,6 +1343,9 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        if (*(u8 *)iph != 0x45)
                goto out_unlock;
 
+       if (ip_is_fragment(iph))
+               goto out_unlock;
+
        if (unlikely(ip_fast_csum((u8 *)iph, 5)))
                goto out_unlock;
 
index ebd953bc5607f3b25fffddcb26a5c65e5490cb2b..1d46d05efb0ff067c35750ac43be8e7babd60446 100644 (file)
@@ -488,16 +488,15 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk,
                return false;
 
        /* Support IP_PKTINFO on tstamp packets if requested, to correlate
-        * timestamp with egress dev. Not possible for packets without dev
+        * timestamp with egress dev. Not possible for packets without iif
         * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
         */
-       if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
-           (!skb->dev))
+       info = PKTINFO_SKB_CB(skb);
+       if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
+           !info->ipi_ifindex)
                return false;
 
-       info = PKTINFO_SKB_CB(skb);
        info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
-       info->ipi_ifindex = skb->dev->ifindex;
        return true;
 }
 
@@ -591,6 +590,7 @@ static bool setsockopt_needs_rtnl(int optname)
        case MCAST_LEAVE_GROUP:
        case MCAST_LEAVE_SOURCE_GROUP:
        case MCAST_UNBLOCK_SOURCE:
+       case IP_ROUTER_ALERT:
                return true;
        }
        return false;
index c0317c940bcdc303015f500b52198e0862440e17..b036e85e093b3e97cee1b0dbffc8d1dfeb6a2b72 100644 (file)
@@ -1278,7 +1278,7 @@ static void mrtsock_destruct(struct sock *sk)
        struct net *net = sock_net(sk);
        struct mr_table *mrt;
 
-       rtnl_lock();
+       ASSERT_RTNL();
        ipmr_for_each_table(mrt, net) {
                if (sk == rtnl_dereference(mrt->mroute_sk)) {
                        IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
@@ -1289,7 +1289,6 @@ static void mrtsock_destruct(struct sock *sk)
                        mroute_clean_tables(mrt, false);
                }
        }
-       rtnl_unlock();
 }
 
 /* Socket options and virtual interface manipulation. The whole
@@ -1353,13 +1352,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                if (sk != rcu_access_pointer(mrt->mroute_sk)) {
                        ret = -EACCES;
                } else {
-                       /* We need to unlock here because mrtsock_destruct takes
-                        * care of rtnl itself and we can't change that due to
-                        * the IP_ROUTER_ALERT setsockopt which runs without it.
-                        */
-                       rtnl_unlock();
                        ret = ip_ra_control(sk, 0, NULL);
-                       goto out;
+                       goto out_unlock;
                }
                break;
        case MRT_ADD_VIF:
@@ -1470,7 +1464,6 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
        }
 out_unlock:
        rtnl_unlock();
-out:
        return ret;
 }
 
index 52f26459efc345a8a0c00d356306fb5fd398547e..9b8841316e7b94e375cc52d0dfd7f9fe89205195 100644 (file)
@@ -461,7 +461,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
 
        clusterip_config_put(cipinfo->config);
 
-       nf_ct_netns_get(par->net, par->family);
+       nf_ct_netns_put(par->net, par->family);
 }
 
 #ifdef CONFIG_COMPAT
index 8119e1f66e036ad2a8372bf24dd943c7d9631d8e..9d943974de2b6d91c56b2ae2dee0019883f8f3cf 100644 (file)
@@ -682,7 +682,9 @@ static void raw_close(struct sock *sk, long timeout)
        /*
         * Raw sockets may have direct kernel references. Kill them.
         */
+       rtnl_lock();
        ip_ra_control(sk, 0, NULL);
+       rtnl_unlock();
 
        sk_common_release(sk);
 }
index 8471dd116771462d149e1da2807e446b69b74bcc..d9724889ff09077aa88c98ec3e170dcfdb91d29b 100644 (file)
@@ -2359,7 +2359,8 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
                }
 
                /* L3 master device is the loopback for that domain */
-               dev_out = l3mdev_master_dev_rcu(dev_out) ? : net->loopback_dev;
+               dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(res)) ? :
+                       net->loopback_dev;
                fl4->flowi4_oif = dev_out->ifindex;
                flags |= RTCF_LOCAL;
                goto make_route;
@@ -2620,7 +2621,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        skb_reset_network_header(skb);
 
        /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
-       ip_hdr(skb)->protocol = IPPROTO_ICMP;
+       ip_hdr(skb)->protocol = IPPROTO_UDP;
        skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
 
        src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
index 1e319a525d51b0b603a5ccc5143381c752b9f2c7..40ba4249a58677671b68bf495f32f15b7c5f62d7 100644 (file)
@@ -2322,6 +2322,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tcp_init_send_head(sk);
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
+       tcp_saved_syn_free(tp);
 
        /* Clean up fastopen related fields */
        tcp_free_fastopen_req(tp);
index 79c4817abc94d08265edb2dfa995e3e479148a16..6e3c512054a60715e8e2d16ffedd12cba6a3d2d9 100644 (file)
@@ -168,12 +168,8 @@ void tcp_assign_congestion_control(struct sock *sk)
        }
 out:
        rcu_read_unlock();
+       memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
-       /* Clear out private data before diag gets it and
-        * the ca has not been initialized.
-        */
-       if (ca->get_info)
-               memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
        if (ca->flags & TCP_CONG_NEEDS_ECN)
                INET_ECN_xmit(sk);
        else
@@ -200,11 +196,10 @@ static void tcp_reinit_congestion_control(struct sock *sk,
        tcp_cleanup_congestion_control(sk);
        icsk->icsk_ca_ops = ca;
        icsk->icsk_ca_setsockopt = 1;
+       memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
-       if (sk->sk_state != TCP_CLOSE) {
-               memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+       if (sk->sk_state != TCP_CLOSE)
                tcp_init_congestion_control(sk);
-       }
 }
 
 /* Manage refcounts on socket close. */
index 2c1f59386a7bac8a8d9034ad8c64ba14d877fed2..659d1baefb2bba36d96e412eb7ca5a02996fb6dd 100644 (file)
@@ -1935,6 +1935,7 @@ void tcp_enter_loss(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct net *net = sock_net(sk);
        struct sk_buff *skb;
+       bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
        bool is_reneg;                  /* is receiver reneging on SACKs? */
        bool mark_lost;
 
@@ -1994,15 +1995,18 @@ void tcp_enter_loss(struct sock *sk)
        tp->high_seq = tp->snd_nxt;
        tcp_ecn_queue_cwr(tp);
 
-       /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO
-        * if a previous recovery is underway, otherwise it may incorrectly
-        * call a timeout spurious if some previously retransmitted packets
-        * are s/acked (sec 3.2). We do not apply that retriction since
-        * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS
-        * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO
-        * on PTMU discovery to avoid sending new data.
+       /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
+        * loss recovery is underway except recurring timeout(s) on
+        * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
+        *
+        * In theory F-RTO can be used repeatedly during loss recovery.
+        * In practice this interacts badly with broken middle-boxes that
+        * falsely raise the receive window, which results in repeated
+        * timeouts and stop-and-go behavior.
         */
-       tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size;
+       tp->frto = sysctl_tcp_frto &&
+                  (new_recovery || icsk->icsk_retransmits) &&
+                  !inet_csk(sk)->icsk_mtup.probe_size;
 }
 
 /* If ACK arrived pointing to a remembered SACK, it means that our
index 22548b5f05cbe5a655e0c53df2d31c5cc2e8a702..a85d863c44196e60fd22e25471cf773e72d2c133 100644 (file)
@@ -1267,7 +1267,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
  * eventually). The difference is that pulled data not copied, but
  * immediately discarded.
  */
-static void __pskb_trim_head(struct sk_buff *skb, int len)
+static int __pskb_trim_head(struct sk_buff *skb, int len)
 {
        struct skb_shared_info *shinfo;
        int i, k, eat;
@@ -1277,7 +1277,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
                __skb_pull(skb, eat);
                len -= eat;
                if (!len)
-                       return;
+                       return 0;
        }
        eat = len;
        k = 0;
@@ -1303,23 +1303,28 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
        skb_reset_tail_pointer(skb);
        skb->data_len -= len;
        skb->len = skb->data_len;
+       return len;
 }
 
 /* Remove acked data from a packet in the transmit queue. */
 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
 {
+       u32 delta_truesize;
+
        if (skb_unclone(skb, GFP_ATOMIC))
                return -ENOMEM;
 
-       __pskb_trim_head(skb, len);
+       delta_truesize = __pskb_trim_head(skb, len);
 
        TCP_SKB_CB(skb)->seq += len;
        skb->ip_summed = CHECKSUM_PARTIAL;
 
-       skb->truesize        -= len;
-       sk->sk_wmem_queued   -= len;
-       sk_mem_uncharge(sk, len);
-       sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+       if (delta_truesize) {
+               skb->truesize      -= delta_truesize;
+               sk->sk_wmem_queued -= delta_truesize;
+               sk_mem_uncharge(sk, delta_truesize);
+               sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+       }
 
        /* Any change of skb->len requires recalculation of tso factor. */
        if (tcp_skb_pcount(skb) > 1)
@@ -2999,6 +3004,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
 {
        struct sk_buff *skb;
 
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
+
        /* NOTE: No TCP options attached and we never retransmit this. */
        skb = alloc_skb(MAX_TCP_HEADER, priority);
        if (!skb) {
@@ -3014,8 +3021,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
        /* Send it off. */
        if (tcp_transmit_skb(sk, skb, 0, priority))
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
-
-       TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
 }
 
 /* Send a crossed SYN-ACK during socket establishment.
index b2be1d9757efb8ce8b82dc0a0fe3a475d193ea5b..781250151d40ee4559f7b90d15dccad8ffaeafd0 100644 (file)
@@ -29,6 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
        u16 mac_len = skb->mac_len;
        int udp_offset, outer_hlen;
        __wsum partial;
+       bool need_ipsec;
 
        if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
                goto out;
@@ -62,8 +63,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
 
        ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
 
+       need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
        /* Try to offload checksum if possible */
        offload_csum = !!(need_csum &&
+                         !need_ipsec &&
                          (skb->dev->features &
                           (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
                                      (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
index 363172527e433e321cfa9fe8e96cfe32e4a78043..0ea96c4d334da2821a8d9c0e5e7d0d513dcb4228 100644 (file)
@@ -3271,14 +3271,24 @@ static void addrconf_gre_config(struct net_device *dev)
 static int fixup_permanent_addr(struct inet6_dev *idev,
                                struct inet6_ifaddr *ifp)
 {
-       if (!ifp->rt) {
-               struct rt6_info *rt;
+       /* rt6i_ref == 0 means the host route was removed from the
+        * FIB, for example, if 'lo' device is taken down. In that
+        * case regenerate the host route.
+        */
+       if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) {
+               struct rt6_info *rt, *prev;
 
                rt = addrconf_dst_alloc(idev, &ifp->addr, false);
                if (unlikely(IS_ERR(rt)))
                        return PTR_ERR(rt);
 
+               /* ifp->rt can be accessed outside of rtnl */
+               spin_lock(&ifp->lock);
+               prev = ifp->rt;
                ifp->rt = rt;
+               spin_unlock(&ifp->lock);
+
+               ip6_rt_put(prev);
        }
 
        if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
@@ -3626,14 +3636,19 @@ restart:
        INIT_LIST_HEAD(&del_list);
        list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
                struct rt6_info *rt = NULL;
+               bool keep;
 
                addrconf_del_dad_work(ifa);
 
+               keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+                       !addr_is_local(&ifa->addr);
+               if (!keep)
+                       list_move(&ifa->if_list, &del_list);
+
                write_unlock_bh(&idev->lock);
                spin_lock_bh(&ifa->lock);
 
-               if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
-                   !addr_is_local(&ifa->addr)) {
+               if (keep) {
                        /* set state to skip the notifier below */
                        state = INET6_IFADDR_STATE_DEAD;
                        ifa->state = 0;
@@ -3645,8 +3660,6 @@ restart:
                } else {
                        state = ifa->state;
                        ifa->state = INET6_IFADDR_STATE_DEAD;
-
-                       list_move(&ifa->if_list, &del_list);
                }
 
                spin_unlock_bh(&ifa->lock);
index a9a9553ee63df8eb6e16e00d5da8c29406435350..e82e59f22dfc0e8eabe6b8dd3e12f5c25533142b 100644 (file)
@@ -933,8 +933,6 @@ static int __init inet6_init(void)
        if (err)
                goto igmp_fail;
 
-       ipv6_stub = &ipv6_stub_impl;
-
        err = ipv6_netfilter_init();
        if (err)
                goto netfilter_fail;
@@ -1010,6 +1008,10 @@ static int __init inet6_init(void)
        if (err)
                goto sysctl_fail;
 #endif
+
+       /* ensure that ipv6 stubs are visible only after ipv6 is ready */
+       wmb();
+       ipv6_stub = &ipv6_stub_impl;
 out:
        return err;
 
index eec27f87efaca15133cf1d5225e37e6a2f6a6f8a..e011122ebd43c190aec3812099345ec852444284 100644 (file)
@@ -405,9 +405,6 @@ static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
  * At one point, excluding local errors was a quick test to identify icmp/icmp6
  * errors. This is no longer true, but the test remained, so the v6 stack,
  * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
- *
- * Timestamp code paths do not initialize the fields expected by cmsg:
- * the PKTINFO fields in skb->cb[]. Fill those in here.
  */
 static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
                                      struct sock_exterr_skb *serr)
@@ -419,14 +416,9 @@ static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
        if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
                return false;
 
-       if (!skb->dev)
+       if (!IP6CB(skb)->iif)
                return false;
 
-       if (skb->protocol == htons(ETH_P_IPV6))
-               IP6CB(skb)->iif = skb->dev->ifindex;
-       else
-               PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
-
        return true;
 }
 
index 275cac628a95066f0a27e93f5015ddeb0172c28c..d32e2110aff286cf6c911048e96fc3abf6e10779 100644 (file)
@@ -388,7 +388,6 @@ looped_back:
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
                                  ((&hdr->segments_left) -
                                   skb_network_header(skb)));
-               kfree_skb(skb);
                return -1;
        }
 
@@ -910,6 +909,8 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
 {
        switch (opt->type) {
        case IPV6_SRCRT_TYPE_0:
+       case IPV6_SRCRT_STRICT:
+       case IPV6_SRCRT_TYPE_2:
                ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
                break;
        case IPV6_SRCRT_TYPE_4:
@@ -1164,6 +1165,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
 
        switch (opt->srcrt->type) {
        case IPV6_SRCRT_TYPE_0:
+       case IPV6_SRCRT_STRICT:
+       case IPV6_SRCRT_TYPE_2:
                fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
                break;
        case IPV6_SRCRT_TYPE_4:
index aacfb4bce1533b3f3b38e1173c18cb1bb6b33099..c45b12b4431cbfcaef1f8452bae871bb176be478 100644 (file)
@@ -122,11 +122,14 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
                        max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
        /*
         * RFC4291 2.5.3
+        * The loopback address must not be used as the source address in IPv6
+        * packets that are sent outside of a single node. [..]
         * A packet received on an interface with a destination address
         * of loopback must be dropped.
         */
-       if (!(dev->flags & IFF_LOOPBACK) &&
-           ipv6_addr_loopback(&hdr->daddr))
+       if ((ipv6_addr_loopback(&hdr->saddr) ||
+            ipv6_addr_loopback(&hdr->daddr)) &&
+            !(dev->flags & IFF_LOOPBACK))
                goto err;
 
        /* RFC4291 Errata ID: 3480
index 75fac933c209a0f430279dea10b5dd2426a7ed31..a9692ec0cd6d0ba9fecba143d16191e8df0d9572 100644 (file)
@@ -1037,7 +1037,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
        struct ip6_tnl *t = netdev_priv(dev);
        struct net *net = t->net;
        struct net_device_stats *stats = &t->dev->stats;
-       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct ipv6hdr *ipv6h;
        struct ipv6_tel_txoption opt;
        struct dst_entry *dst = NULL, *ndst = NULL;
        struct net_device *tdev;
@@ -1057,26 +1057,28 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
 
        /* NBMA tunnel */
        if (ipv6_addr_any(&t->parms.raddr)) {
-               struct in6_addr *addr6;
-               struct neighbour *neigh;
-               int addr_type;
+               if (skb->protocol == htons(ETH_P_IPV6)) {
+                       struct in6_addr *addr6;
+                       struct neighbour *neigh;
+                       int addr_type;
 
-               if (!skb_dst(skb))
-                       goto tx_err_link_failure;
+                       if (!skb_dst(skb))
+                               goto tx_err_link_failure;
 
-               neigh = dst_neigh_lookup(skb_dst(skb),
-                                        &ipv6_hdr(skb)->daddr);
-               if (!neigh)
-                       goto tx_err_link_failure;
+                       neigh = dst_neigh_lookup(skb_dst(skb),
+                                                &ipv6_hdr(skb)->daddr);
+                       if (!neigh)
+                               goto tx_err_link_failure;
 
-               addr6 = (struct in6_addr *)&neigh->primary_key;
-               addr_type = ipv6_addr_type(addr6);
+                       addr6 = (struct in6_addr *)&neigh->primary_key;
+                       addr_type = ipv6_addr_type(addr6);
 
-               if (addr_type == IPV6_ADDR_ANY)
-                       addr6 = &ipv6_hdr(skb)->daddr;
+                       if (addr_type == IPV6_ADDR_ANY)
+                               addr6 = &ipv6_hdr(skb)->daddr;
 
-               memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
-               neigh_release(neigh);
+                       memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
+                       neigh_release(neigh);
+               }
        } else if (!(t->parms.flags &
                     (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
                /* enable the cache only only if the routing decision does
index 6ba6c900ebcf430cf313a2bef55ff69c114af218..bf34d0950752ba1466fa806fd4f8ce0b802b0087 100644 (file)
@@ -774,7 +774,8 @@ failure:
  *     Delete a VIF entry
  */
 
-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
+static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
+                      struct list_head *head)
 {
        struct mif_device *v;
        struct net_device *dev;
@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
                                             dev->ifindex, &in6_dev->cnf);
        }
 
-       if (v->flags & MIFF_REGISTER)
+       if ((v->flags & MIFF_REGISTER) && !notify)
                unregister_netdevice_queue(dev, head);
 
        dev_put(dev);
@@ -1331,7 +1332,6 @@ static int ip6mr_device_event(struct notifier_block *this,
        struct mr6_table *mrt;
        struct mif_device *v;
        int ct;
-       LIST_HEAD(list);
 
        if (event != NETDEV_UNREGISTER)
                return NOTIFY_DONE;
@@ -1340,10 +1340,9 @@ static int ip6mr_device_event(struct notifier_block *this,
                v = &mrt->vif6_table[0];
                for (ct = 0; ct < mrt->maxvif; ct++, v++) {
                        if (v->dev == dev)
-                               mif6_delete(mrt, ct, &list);
+                               mif6_delete(mrt, ct, 1, NULL);
                }
        }
-       unregister_netdevice_many(&list);
 
        return NOTIFY_DONE;
 }
@@ -1552,7 +1551,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
        for (i = 0; i < mrt->maxvif; i++) {
                if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
                        continue;
-               mif6_delete(mrt, i, &list);
+               mif6_delete(mrt, i, 0, &list);
        }
        unregister_netdevice_many(&list);
 
@@ -1707,7 +1706,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
                if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
                        return -EFAULT;
                rtnl_lock();
-               ret = mif6_delete(mrt, mifi, NULL);
+               ret = mif6_delete(mrt, mifi, 0, NULL);
                rtnl_unlock();
                return ret;
 
index 7ebac630d3c603186be2fc0dcbaac7d7e74bfde6..cb1766724a4ca12ec9ccbc452c776261477c99f1 100644 (file)
@@ -1749,7 +1749,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
                idev = in6_dev_get(dev);
                if (!idev)
                        break;
-               if (idev->cnf.ndisc_notify)
+               if (idev->cnf.ndisc_notify ||
+                   net->ipv6.devconf_all->ndisc_notify)
                        ndisc_send_unsol_na(dev);
                in6_dev_put(idev);
                break;
index f174e76e6505d4045e940c9fceef765d2aaa937d..0da6a12b5472e322d679572c7244e5c9bc467741 100644 (file)
@@ -1178,8 +1178,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
                if (skb)
-                       amount = skb_tail_pointer(skb) -
-                               skb_transport_header(skb);
+                       amount = skb->len;
                spin_unlock_bh(&sk->sk_receive_queue.lock);
                return put_user(amount, (int __user *)arg);
        }
index 9db1418993f2b8a5b4194895f243441033d4729a..fb174b590fd3b443a7503207d822dd02e7171290 100644 (file)
@@ -1854,6 +1854,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
        int addr_type;
        int err = -EINVAL;
 
+       /* RTF_PCPU is an internal flag; can not be set by userspace */
+       if (cfg->fc_flags & RTF_PCPU)
+               goto out;
+
        if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
                goto out;
 #ifndef CONFIG_IPV6_SUBTREES
index a855eb325b030a666fe92c56a2d432c77d9dfe7a..5f44ffed25768d83c31b31295474c5ecf623e986 100644 (file)
@@ -53,6 +53,9 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
                struct sr6_tlv *tlv;
                unsigned int tlv_len;
 
+               if (trailing < sizeof(*tlv))
+                       return false;
+
                tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset);
                tlv_len = sizeof(*tlv) + tlv->len;
 
index c6252ed42c1de65dee149d7d869b62b96616e22a..be8cecc6500214de68cc8872b48b38c840d3304f 100644 (file)
@@ -63,8 +63,13 @@ struct pfkey_sock {
                } u;
                struct sk_buff  *skb;
        } dump;
+       struct mutex dump_lock;
 };
 
+static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
+                              xfrm_address_t *saddr, xfrm_address_t *daddr,
+                              u16 *family);
+
 static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
 {
        return (struct pfkey_sock *)sk;
@@ -139,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
 {
        struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
        struct sock *sk;
+       struct pfkey_sock *pfk;
        int err;
 
        if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -153,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
        if (sk == NULL)
                goto out;
 
+       pfk = pfkey_sk(sk);
+       mutex_init(&pfk->dump_lock);
+
        sock->ops = &pfkey_ops;
        sock_init_data(sock, sk);
 
@@ -281,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
        struct sadb_msg *hdr;
        int rc;
 
+       mutex_lock(&pfk->dump_lock);
+       if (!pfk->dump.dump) {
+               rc = 0;
+               goto out;
+       }
+
        rc = pfk->dump.dump(pfk);
-       if (rc == -ENOBUFS)
-               return 0;
+       if (rc == -ENOBUFS) {
+               rc = 0;
+               goto out;
+       }
 
        if (pfk->dump.skb) {
-               if (!pfkey_can_dump(&pfk->sk))
-                       return 0;
+               if (!pfkey_can_dump(&pfk->sk)) {
+                       rc = 0;
+                       goto out;
+               }
 
                hdr = (struct sadb_msg *) pfk->dump.skb->data;
                hdr->sadb_msg_seq = 0;
@@ -298,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
        }
 
        pfkey_terminate_dump(pfk);
+
+out:
+       mutex_unlock(&pfk->dump_lock);
        return rc;
 }
 
@@ -1793,19 +1815,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
        struct xfrm_address_filter *filter = NULL;
        struct pfkey_sock *pfk = pfkey_sk(sk);
 
-       if (pfk->dump.dump != NULL)
+       mutex_lock(&pfk->dump_lock);
+       if (pfk->dump.dump != NULL) {
+               mutex_unlock(&pfk->dump_lock);
                return -EBUSY;
+       }
 
        proto = pfkey_satype2proto(hdr->sadb_msg_satype);
-       if (proto == 0)
+       if (proto == 0) {
+               mutex_unlock(&pfk->dump_lock);
                return -EINVAL;
+       }
 
        if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
                struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
 
                filter = kmalloc(sizeof(*filter), GFP_KERNEL);
-               if (filter == NULL)
+               if (filter == NULL) {
+                       mutex_unlock(&pfk->dump_lock);
                        return -ENOMEM;
+               }
 
                memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
                       sizeof(xfrm_address_t));
@@ -1821,6 +1850,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
        pfk->dump.dump = pfkey_dump_sa;
        pfk->dump.done = pfkey_dump_sa_done;
        xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
+       mutex_unlock(&pfk->dump_lock);
 
        return pfkey_do_dump(pfk);
 }
@@ -1913,19 +1943,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
 
        /* addresses present only in tunnel mode */
        if (t->mode == XFRM_MODE_TUNNEL) {
-               u8 *sa = (u8 *) (rq + 1);
-               int family, socklen;
+               int err;
 
-               family = pfkey_sockaddr_extract((struct sockaddr *)sa,
-                                               &t->saddr);
-               if (!family)
-                       return -EINVAL;
-
-               socklen = pfkey_sockaddr_len(family);
-               if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
-                                          &t->id.daddr) != family)
-                       return -EINVAL;
-               t->encap_family = family;
+               err = parse_sockaddr_pair(
+                       (struct sockaddr *)(rq + 1),
+                       rq->sadb_x_ipsecrequest_len - sizeof(*rq),
+                       &t->saddr, &t->id.daddr, &t->encap_family);
+               if (err)
+                       return err;
        } else
                t->encap_family = xp->family;
 
@@ -1945,7 +1970,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
        if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
                return -EINVAL;
 
-       while (len >= sizeof(struct sadb_x_ipsecrequest)) {
+       while (len >= sizeof(*rq)) {
+               if (len < rq->sadb_x_ipsecrequest_len ||
+                   rq->sadb_x_ipsecrequest_len < sizeof(*rq))
+                       return -EINVAL;
+
                if ((err = parse_ipsecrequest(xp, rq)) < 0)
                        return err;
                len -= rq->sadb_x_ipsecrequest_len;
@@ -2408,7 +2437,6 @@ out:
        return err;
 }
 
-#ifdef CONFIG_NET_KEY_MIGRATE
 static int pfkey_sockaddr_pair_size(sa_family_t family)
 {
        return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
@@ -2420,7 +2448,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
 {
        int af, socklen;
 
-       if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
+       if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
                return -EINVAL;
 
        af = pfkey_sockaddr_extract(sa, saddr);
@@ -2436,6 +2464,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
        return 0;
 }
 
+#ifdef CONFIG_NET_KEY_MIGRATE
 static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
                                    struct xfrm_migrate *m)
 {
@@ -2443,13 +2472,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
        struct sadb_x_ipsecrequest *rq2;
        int mode;
 
-       if (len <= sizeof(struct sadb_x_ipsecrequest) ||
-           len < rq1->sadb_x_ipsecrequest_len)
+       if (len < sizeof(*rq1) ||
+           len < rq1->sadb_x_ipsecrequest_len ||
+           rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
                return -EINVAL;
 
        /* old endoints */
        err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
-                                 rq1->sadb_x_ipsecrequest_len,
+                                 rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
                                  &m->old_saddr, &m->old_daddr,
                                  &m->old_family);
        if (err)
@@ -2458,13 +2488,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
        rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
        len -= rq1->sadb_x_ipsecrequest_len;
 
-       if (len <= sizeof(struct sadb_x_ipsecrequest) ||
-           len < rq2->sadb_x_ipsecrequest_len)
+       if (len <= sizeof(*rq2) ||
+           len < rq2->sadb_x_ipsecrequest_len ||
+           rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
                return -EINVAL;
 
        /* new endpoints */
        err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
-                                 rq2->sadb_x_ipsecrequest_len,
+                                 rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
                                  &m->new_saddr, &m->new_daddr,
                                  &m->new_family);
        if (err)
@@ -2679,14 +2710,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
 {
        struct pfkey_sock *pfk = pfkey_sk(sk);
 
-       if (pfk->dump.dump != NULL)
+       mutex_lock(&pfk->dump_lock);
+       if (pfk->dump.dump != NULL) {
+               mutex_unlock(&pfk->dump_lock);
                return -EBUSY;
+       }
 
        pfk->dump.msg_version = hdr->sadb_msg_version;
        pfk->dump.msg_portid = hdr->sadb_msg_pid;
        pfk->dump.dump = pfkey_dump_sp;
        pfk->dump.done = pfkey_dump_sp_done;
        xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
+       mutex_unlock(&pfk->dump_lock);
 
        return pfkey_do_dump(pfk);
 }
index 861b255a2d5195ac4155de919154d993bdaddf92..32ea0f3d868c6459c4194732ffcc14d8a20ec8bc 100644 (file)
@@ -1383,8 +1383,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
        } else
                err = pppol2tp_session_setsockopt(sk, session, optname, val);
 
-       err = 0;
-
 end_put_sess:
        sock_put(sk);
 end:
@@ -1507,8 +1505,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
 
                err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
                sock_put(ps->tunnel_sock);
-       } else
+               if (err)
+                       goto end_put_sess;
+       } else {
                err = pppol2tp_session_getsockopt(sk, session, optname, &val);
+               if (err)
+                       goto end_put_sess;
+       }
 
        err = -EFAULT;
        if (put_user(len, optlen))
index e48724a6725e3266c1d5559d268339a7d2cd7f10..4d7543d1a62cce8d70e2d6894ffb86920c80c241 100644 (file)
@@ -208,6 +208,51 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
        return len;
 }
 
+static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
+                                        struct sk_buff *skb,
+                                        int rtap_vendor_space)
+{
+       struct {
+               struct ieee80211_hdr_3addr hdr;
+               u8 category;
+               u8 action_code;
+       } __packed action;
+
+       if (!sdata)
+               return;
+
+       BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
+
+       if (skb->len < rtap_vendor_space + sizeof(action) +
+                      VHT_MUMIMO_GROUPS_DATA_LEN)
+               return;
+
+       if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
+               return;
+
+       skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
+
+       if (!ieee80211_is_action(action.hdr.frame_control))
+               return;
+
+       if (action.category != WLAN_CATEGORY_VHT)
+               return;
+
+       if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
+               return;
+
+       if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
+               return;
+
+       skb = skb_copy(skb, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+       skb_queue_tail(&sdata->skb_queue, skb);
+       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+}
+
 /*
  * ieee80211_add_rx_radiotap_header - add radiotap header
  *
@@ -515,7 +560,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
        struct net_device *prev_dev = NULL;
        int present_fcs_len = 0;
        unsigned int rtap_vendor_space = 0;
-       struct ieee80211_mgmt *mgmt;
        struct ieee80211_sub_if_data *monitor_sdata =
                rcu_dereference(local->monitor_sdata);
 
@@ -553,6 +597,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
                return remove_monitor_info(local, origskb, rtap_vendor_space);
        }
 
+       ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
+
        /* room for the radiotap header based on driver features */
        rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
        needed_headroom = rt_hdrlen - rtap_vendor_space;
@@ -618,23 +664,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
                ieee80211_rx_stats(sdata->dev, skb->len);
        }
 
-       mgmt = (void *)skb->data;
-       if (monitor_sdata &&
-           skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
-           ieee80211_is_action(mgmt->frame_control) &&
-           mgmt->u.action.category == WLAN_CATEGORY_VHT &&
-           mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
-           is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
-           ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
-               struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
-
-               if (mu_skb) {
-                       mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
-                       skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
-                       ieee80211_queue_work(&local->hw, &monitor_sdata->work);
-               }
-       }
-
        if (prev_dev) {
                skb->dev = prev_dev;
                netif_receive_skb(skb);
@@ -3610,6 +3639,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
                            !ether_addr_equal(bssid, hdr->addr1))
                                return false;
                }
+
+               /*
+                * 802.11-2016 Table 9-26 says that for data frames, A1 must be
+                * the BSSID - we've checked that already but may have accepted
+                * the wildcard (ff:ff:ff:ff:ff:ff).
+                *
+                * It also says:
+                *      The BSSID of the Data frame is determined as follows:
+                *      a) If the STA is contained within an AP or is associated
+                *         with an AP, the BSSID is the address currently in use
+                *         by the STA contained in the AP.
+                *
+                * So we should not accept data frames with an address that's
+                * multicast.
+                *
+                * Accepting it also opens a security problem because stations
+                * could encrypt it with the GTK and inject traffic that way.
+                */
+               if (ieee80211_is_data(hdr->frame_control) && multicast)
+                       return false;
+
                return true;
        case NL80211_IFTYPE_WDS:
                if (bssid || !ieee80211_is_data(hdr->frame_control))
index 4b2e1fb28bb438d695715fc492f52bf7809ade5d..d80073037856db9081e57b7c88e482a61b94a45f 100644 (file)
@@ -57,7 +57,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
        hlist_del_rcu(&exp->hnode);
        net->ct.expect_count--;
 
-       hlist_del(&exp->lnode);
+       hlist_del_rcu(&exp->lnode);
        master_help->expecting[exp->class]--;
 
        nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
@@ -363,7 +363,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        /* two references : one for hash insert, one for the timer */
        atomic_add(2, &exp->use);
 
-       hlist_add_head(&exp->lnode, &master_help->expectations);
+       hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
        master_help->expecting[exp->class]++;
 
        hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
index 6dc44d9b41900bea12f487e5a044259e92a47f7e..4eeb3418366ad5473945395346b6e4e5fc213fbc 100644 (file)
@@ -158,16 +158,25 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
 {
        struct nf_conntrack_helper *h;
 
+       rcu_read_lock();
+
        h = __nf_conntrack_helper_find(name, l3num, protonum);
 #ifdef CONFIG_MODULES
        if (h == NULL) {
-               if (request_module("nfct-helper-%s", name) == 0)
+               rcu_read_unlock();
+               if (request_module("nfct-helper-%s", name) == 0) {
+                       rcu_read_lock();
                        h = __nf_conntrack_helper_find(name, l3num, protonum);
+               } else {
+                       return h;
+               }
        }
 #endif
        if (h != NULL && !try_module_get(h->me))
                h = NULL;
 
+       rcu_read_unlock();
+
        return h;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
@@ -311,38 +320,36 @@ void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
 }
 EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
 
+/* Caller should hold the rcu lock */
 struct nf_ct_helper_expectfn *
 nf_ct_helper_expectfn_find_by_name(const char *name)
 {
        struct nf_ct_helper_expectfn *cur;
        bool found = false;
 
-       rcu_read_lock();
        list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
                if (!strcmp(cur->name, name)) {
                        found = true;
                        break;
                }
        }
-       rcu_read_unlock();
        return found ? cur : NULL;
 }
 EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
 
+/* Caller should hold the rcu lock */
 struct nf_ct_helper_expectfn *
 nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
 {
        struct nf_ct_helper_expectfn *cur;
        bool found = false;
 
-       rcu_read_lock();
        list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
                if (cur->expectfn == symbol) {
                        found = true;
                        break;
                }
        }
-       rcu_read_unlock();
        return found ? cur : NULL;
 }
 EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
index 908d858034e4f413b13d9965f41c94efd331489a..dc7dfd68fafe5d8db341488ac7d9ad5bf8f80b46 100644 (file)
@@ -1488,11 +1488,16 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
                 * treat the second attempt as a no-op instead of returning
                 * an error.
                 */
-               if (help && help->helper &&
-                   !strcmp(help->helper->name, helpname))
-                       return 0;
-               else
-                       return -EBUSY;
+               err = -EBUSY;
+               if (help) {
+                       rcu_read_lock();
+                       helper = rcu_dereference(help->helper);
+                       if (helper && !strcmp(helper->name, helpname))
+                               err = 0;
+                       rcu_read_unlock();
+               }
+
+               return err;
        }
 
        if (!strcmp(helpname, "")) {
@@ -1929,9 +1934,9 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
 
                        err = 0;
                        if (test_bit(IPS_EXPECTED_BIT, &ct->status))
-                               events = IPCT_RELATED;
+                               events = 1 << IPCT_RELATED;
                        else
-                               events = IPCT_NEW;
+                               events = 1 << IPCT_NEW;
 
                        if (cda[CTA_LABELS] &&
                            ctnetlink_attach_labels(ct, cda) == 0)
@@ -2675,8 +2680,8 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        last = (struct nf_conntrack_expect *)cb->args[1];
        for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
 restart:
-               hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]],
-                                    hnode) {
+               hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
+                                        hnode) {
                        if (l3proto && exp->tuple.src.l3num != l3proto)
                                continue;
 
@@ -2727,7 +2732,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        rcu_read_lock();
        last = (struct nf_conntrack_expect *)cb->args[1];
 restart:
-       hlist_for_each_entry(exp, &help->expectations, lnode) {
+       hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
                if (l3proto && exp->tuple.src.l3num != l3proto)
                        continue;
                if (cb->args[1]) {
@@ -2789,6 +2794,12 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
                return -ENOENT;
 
        ct = nf_ct_tuplehash_to_ctrack(h);
+       /* No expectation linked to this connection tracking. */
+       if (!nfct_help(ct)) {
+               nf_ct_put(ct);
+               return 0;
+       }
+
        c.data = ct;
 
        err = netlink_dump_start(ctnl, skb, nlh, &c);
@@ -3133,23 +3144,27 @@ ctnetlink_create_expect(struct net *net,
                return -ENOENT;
        ct = nf_ct_tuplehash_to_ctrack(h);
 
+       rcu_read_lock();
        if (cda[CTA_EXPECT_HELP_NAME]) {
                const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
 
                helper = __nf_conntrack_helper_find(helpname, u3,
                                                    nf_ct_protonum(ct));
                if (helper == NULL) {
+                       rcu_read_unlock();
 #ifdef CONFIG_MODULES
                        if (request_module("nfct-helper-%s", helpname) < 0) {
                                err = -EOPNOTSUPP;
                                goto err_ct;
                        }
+                       rcu_read_lock();
                        helper = __nf_conntrack_helper_find(helpname, u3,
                                                            nf_ct_protonum(ct));
                        if (helper) {
                                err = -EAGAIN;
-                               goto err_ct;
+                               goto err_rcu;
                        }
+                       rcu_read_unlock();
 #endif
                        err = -EOPNOTSUPP;
                        goto err_ct;
@@ -3159,11 +3174,13 @@ ctnetlink_create_expect(struct net *net,
        exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
        if (IS_ERR(exp)) {
                err = PTR_ERR(exp);
-               goto err_ct;
+               goto err_rcu;
        }
 
        err = nf_ct_expect_related_report(exp, portid, report);
        nf_ct_expect_put(exp);
+err_rcu:
+       rcu_read_unlock();
 err_ct:
        nf_ct_put(ct);
        return err;
index d43869879fcfcea6ca23e3a579bd21f8aa855b10..86067560a3184f26c521e35b8f63e4952c95955e 100644 (file)
@@ -101,11 +101,13 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
                rcu_read_lock();
                idev = __in6_dev_get(skb->dev);
                if (idev != NULL) {
+                       read_lock_bh(&idev->lock);
                        list_for_each_entry(ifa, &idev->addr_list, if_list) {
                                newdst = ifa->addr;
                                addr = true;
                                break;
                        }
+                       read_unlock_bh(&idev->lock);
                }
                rcu_read_unlock();
 
index eb2721af898dbb54ab099f4878d7b2673cbb9522..c4dad1254ead01818fb5b2c0474b26f74762fee2 100644 (file)
@@ -21,6 +21,7 @@ struct nft_hash {
        enum nft_registers      sreg:8;
        enum nft_registers      dreg:8;
        u8                      len;
+       bool                    autogen_seed:1;
        u32                     modulus;
        u32                     seed;
        u32                     offset;
@@ -82,10 +83,12 @@ static int nft_hash_init(const struct nft_ctx *ctx,
        if (priv->offset + priv->modulus - 1 < priv->offset)
                return -EOVERFLOW;
 
-       if (tb[NFTA_HASH_SEED])
+       if (tb[NFTA_HASH_SEED]) {
                priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
-       else
+       } else {
+               priv->autogen_seed = true;
                get_random_bytes(&priv->seed, sizeof(priv->seed));
+       }
 
        return nft_validate_register_load(priv->sreg, len) &&
               nft_validate_register_store(ctx, priv->dreg, NULL,
@@ -105,7 +108,8 @@ static int nft_hash_dump(struct sk_buff *skb,
                goto nla_put_failure;
        if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
                goto nla_put_failure;
-       if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
+       if (!priv->autogen_seed &&
+           nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
                goto nla_put_failure;
        if (priv->offset != 0)
                if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
index 27241a767f17b4b27d24095a31e5e9a2d3e29ce4..c64aca611ac5c5f81ad7c925652bbb90554763ac 100644 (file)
@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
        tcp_hdrlen = tcph->doff * 4;
 
-       if (len < tcp_hdrlen)
+       if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
                return -1;
 
        if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        if (len > tcp_hdrlen)
                return 0;
 
+       /* tcph->doff has 4 bits, do not wrap it to 0 */
+       if (tcp_hdrlen >= 15 * 4)
+               return 0;
+
        /*
         * MSS Option not found ?! add it..
         */
index 80cb7babeb6427d5768f9e636d5d9633d46f8413..df7f1df0033090c0cd76f3afda43e5159a509791 100644 (file)
@@ -393,7 +393,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
 
        rcu_read_lock();
        indev = __in6_dev_get(skb->dev);
-       if (indev)
+       if (indev) {
+               read_lock_bh(&indev->lock);
                list_for_each_entry(ifa, &indev->addr_list, if_list) {
                        if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
                                continue;
@@ -401,6 +402,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
                        laddr = &ifa->addr;
                        break;
                }
+               read_unlock_bh(&indev->lock);
+       }
        rcu_read_unlock();
 
        return laddr ? laddr : daddr;
index 8489beff5c25c971067f38833ed4a790a98dd86e..ea81ccf3c7d6a53095b4922329c25b566d5b5940 100644 (file)
@@ -3836,6 +3836,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        case PACKET_HDRLEN:
                if (len > sizeof(int))
                        len = sizeof(int);
+               if (len < sizeof(int))
+                       return -EINVAL;
                if (copy_from_user(&val, optval, len))
                        return -EFAULT;
                switch (val) {
index ae5ac175b2bef96ffa614bc799db5cd90a7bdc08..9da7368b0140f9e4a96794e21d66f487881987ba 100644 (file)
@@ -658,7 +658,9 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        }
 
        if (plen != len) {
-               skb_pad(skb, plen - len);
+               rc = skb_pad(skb, plen - len);
+               if (rc)
+                       goto out_node;
                skb_put(skb, plen - len);
        }
 
index b70aa57319ea3233395dc7ae349b8b7aab5dfd03..e05b924618a03e6a58655873700ee66e0688b7ad 100644 (file)
@@ -529,20 +529,20 @@ errout:
        return err;
 }
 
-static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb)
+static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
 {
-       a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL);
-       if (!a->act_cookie)
-               return -ENOMEM;
+       struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return NULL;
 
-       a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
-       if (!a->act_cookie->data) {
-               kfree(a->act_cookie);
-               return -ENOMEM;
+       c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
+       if (!c->data) {
+               kfree(c);
+               return NULL;
        }
-       a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]);
+       c->len = nla_len(tb[TCA_ACT_COOKIE]);
 
-       return 0;
+       return c;
 }
 
 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
@@ -551,6 +551,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
 {
        struct tc_action *a;
        struct tc_action_ops *a_o;
+       struct tc_cookie *cookie = NULL;
        char act_name[IFNAMSIZ];
        struct nlattr *tb[TCA_ACT_MAX + 1];
        struct nlattr *kind;
@@ -566,6 +567,18 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
                        goto err_out;
                if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
                        goto err_out;
+               if (tb[TCA_ACT_COOKIE]) {
+                       int cklen = nla_len(tb[TCA_ACT_COOKIE]);
+
+                       if (cklen > TC_COOKIE_MAX_SIZE)
+                               goto err_out;
+
+                       cookie = nla_memdup_cookie(tb);
+                       if (!cookie) {
+                               err = -ENOMEM;
+                               goto err_out;
+                       }
+               }
        } else {
                err = -EINVAL;
                if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
@@ -604,20 +617,12 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
        if (err < 0)
                goto err_mod;
 
-       if (tb[TCA_ACT_COOKIE]) {
-               int cklen = nla_len(tb[TCA_ACT_COOKIE]);
-
-               if (cklen > TC_COOKIE_MAX_SIZE) {
-                       err = -EINVAL;
-                       tcf_hash_release(a, bind);
-                       goto err_mod;
-               }
-
-               if (nla_memdup_cookie(a, tb) < 0) {
-                       err = -ENOMEM;
-                       tcf_hash_release(a, bind);
-                       goto err_mod;
+       if (name == NULL && tb[TCA_ACT_COOKIE]) {
+               if (a->act_cookie) {
+                       kfree(a->act_cookie->data);
+                       kfree(a->act_cookie);
                }
+               a->act_cookie = cookie;
        }
 
        /* module count goes up only when brand new policy is created
@@ -632,6 +637,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
 err_mod:
        module_put(a_o->owner);
 err_out:
+       if (cookie) {
+               kfree(cookie->data);
+               kfree(cookie);
+       }
        return ERR_PTR(err);
 }
 
index b052b27a984e39c244c94132f1162a7033e5cc63..1a2f9e964330a5cd0c0b9a5cac91807221b0ffd9 100644 (file)
@@ -794,7 +794,7 @@ static void attach_default_qdiscs(struct net_device *dev)
                }
        }
 #ifdef CONFIG_NET_SCHED
-       if (dev->qdisc)
+       if (dev->qdisc != &noop_qdisc)
                qdisc_hash_add(dev->qdisc);
 #endif
 }
index c1401f43d40fc5c5a85dbcbcba424ad2e649ab56..d9d4c92e06b312e6c300afd8f3d5db33161fd9f7 100644 (file)
@@ -7034,6 +7034,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
        if (sock->state != SS_UNCONNECTED)
                goto out;
 
+       if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
+               goto out;
+
        /* If backlog is zero, disable listening. */
        if (!backlog) {
                if (sctp_sstate(sk, CLOSED))
index 7130e73bd42c21758e88b24b875da4bd97b3c4d2..bdce99f9407affaae8ef524d3bd65bca5847d62b 100644 (file)
@@ -866,6 +866,14 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
        if (!tsk_peer_msg(tsk, hdr))
                goto exit;
 
+       if (unlikely(msg_errcode(hdr))) {
+               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+               tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
+                                     tsk_peer_port(tsk));
+               sk->sk_state_change(sk);
+               goto exit;
+       }
+
        tsk->probe_unacked = false;
 
        if (mtyp == CONN_PROBE) {
@@ -1083,7 +1091,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
                }
        } while (sent < dlen && !rc);
 
-       return rc ? rc : sent;
+       return sent ? sent : rc;
 }
 
 /**
@@ -1259,7 +1267,10 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
        struct sock *sk = sock->sk;
        DEFINE_WAIT(wait);
        long timeo = *timeop;
-       int err;
+       int err = sock_error(sk);
+
+       if (err)
+               return err;
 
        for (;;) {
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
@@ -1281,6 +1292,10 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
                err = sock_intr_errno(timeo);
                if (signal_pending(current))
                        break;
+
+               err = sock_error(sk);
+               if (err)
+                       break;
        }
        finish_wait(sk_sleep(sk), &wait);
        *timeop = timeo;
@@ -1484,7 +1499,7 @@ restart:
        if (unlikely(flags & MSG_PEEK))
                goto exit;
 
-       tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
+       tsk->rcv_unacked += tsk_inc(tsk, hlen + msg_data_sz(msg));
        if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
                tipc_sk_send_ack(tsk);
        tsk_advance_rx_queue(sk);
@@ -1551,6 +1566,8 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
        struct sock *sk = &tsk->sk;
        struct net *net = sock_net(sk);
        struct tipc_msg *hdr = buf_msg(skb);
+       u32 pport = msg_origport(hdr);
+       u32 pnode = msg_orignode(hdr);
 
        if (unlikely(msg_mcast(hdr)))
                return false;
@@ -1558,18 +1575,28 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
        switch (sk->sk_state) {
        case TIPC_CONNECTING:
                /* Accept only ACK or NACK message */
-               if (unlikely(!msg_connected(hdr)))
-                       return false;
+               if (unlikely(!msg_connected(hdr))) {
+                       if (pport != tsk_peer_port(tsk) ||
+                           pnode != tsk_peer_node(tsk))
+                               return false;
+
+                       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+                       sk->sk_err = ECONNREFUSED;
+                       sk->sk_state_change(sk);
+                       return true;
+               }
 
                if (unlikely(msg_errcode(hdr))) {
                        tipc_set_sk_state(sk, TIPC_DISCONNECTING);
                        sk->sk_err = ECONNREFUSED;
+                       sk->sk_state_change(sk);
                        return true;
                }
 
                if (unlikely(!msg_isdata(hdr))) {
                        tipc_set_sk_state(sk, TIPC_DISCONNECTING);
                        sk->sk_err = EINVAL;
+                       sk->sk_state_change(sk);
                        return true;
                }
 
@@ -1581,8 +1608,7 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
                        return true;
 
                /* If empty 'ACK-' message, wake up sleeping connect() */
-               if (waitqueue_active(sk_sleep(sk)))
-                       wake_up_interruptible(sk_sleep(sk));
+               sk->sk_data_ready(sk);
 
                /* 'ACK-' message is neither accepted nor rejected: */
                msg_set_dest_droppable(hdr, 1);
index 46bdb4fbed0bb34a5d6ae40991b3fda6e5dff82c..e23570b647ae721516997130d7abebeaf3f8bb03 100644 (file)
@@ -395,7 +395,7 @@ resume:
                if (xo)
                        xfrm_gro = xo->flags & XFRM_GRO;
 
-               err = x->inner_mode->afinfo->transport_finish(skb, async);
+               err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
                if (xfrm_gro) {
                        skb_dst_drop(skb);
                        gro_cells_receive(&gro_cells, skb);
index 236cbbc0ab9cfff05cd027ffb0dc56aa15e61033..dfc77b9c5e5a8dd2b31440be47fb4480513680e1 100644 (file)
@@ -1006,6 +1006,10 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
                err = -ESRCH;
 out:
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+
+       if (cnt)
+               xfrm_garbage_collect(net);
+
        return err;
 }
 EXPORT_SYMBOL(xfrm_policy_flush);
index addf060399e09547307d9c023f36d8dbf869a931..9cb4fe4478a137e7bb3352ae958830657f6b5b7c 100644 (file)
@@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
  * immediately unlinked.
  */
 struct key_type key_type_dead = {
-       .name = "dead",
+       .name = ".dead",
 };
 
 /*
index 52c34532c78562643fce84832a5b536baf84988b..4ad3212adebe8becc152f22d2448f8db4716146b 100644 (file)
@@ -273,7 +273,8 @@ error:
  * Create and join an anonymous session keyring or join a named session
  * keyring, creating it if necessary.  A named session keyring must have Search
  * permission for it to be joined.  Session keyrings without this permit will
- * be skipped over.
+ * be skipped over.  It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
  *
  * If successful, the ID of the joined session keyring will be returned.
  */
@@ -290,12 +291,16 @@ long keyctl_join_session_keyring(const char __user *_name)
                        ret = PTR_ERR(name);
                        goto error;
                }
+
+               ret = -EPERM;
+               if (name[0] == '.')
+                       goto error_name;
        }
 
        /* join the session */
        ret = join_session_keyring(name);
+error_name:
        kfree(name);
-
 error:
        return ret;
 }
@@ -1253,8 +1258,8 @@ error:
  * Read or set the default keyring in which request_key() will cache keys and
  * return the old setting.
  *
- * If a process keyring is specified then this will be created if it doesn't
- * yet exist.  The old setting will be returned if successful.
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist.  The old setting will be returned if successful.
  */
 long keyctl_set_reqkey_keyring(int reqkey_defl)
 {
@@ -1279,11 +1284,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
 
        case KEY_REQKEY_DEFL_PROCESS_KEYRING:
                ret = install_process_keyring_to_cred(new);
-               if (ret < 0) {
-                       if (ret != -EEXIST)
-                               goto error;
-                       ret = 0;
-               }
+               if (ret < 0)
+                       goto error;
                goto set;
 
        case KEY_REQKEY_DEFL_DEFAULT:
index b6fdd22205b169b663cdb00aecd5d214c7a376dd..9139b18fc863eb36d62d7777f1553dda63236dfe 100644 (file)
@@ -128,13 +128,18 @@ error:
 }
 
 /*
- * Install a fresh thread keyring directly to new credentials.  This keyring is
- * allowed to overrun the quota.
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already.  This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
  */
 int install_thread_keyring_to_cred(struct cred *new)
 {
        struct key *keyring;
 
+       if (new->thread_keyring)
+               return 0;
+
        keyring = keyring_alloc("_tid", new->uid, new->gid, new,
                                KEY_POS_ALL | KEY_USR_VIEW,
                                KEY_ALLOC_QUOTA_OVERRUN,
@@ -147,7 +152,9 @@ int install_thread_keyring_to_cred(struct cred *new)
 }
 
 /*
- * Install a fresh thread keyring, discarding the old one.
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
  */
 static int install_thread_keyring(void)
 {
@@ -158,8 +165,6 @@ static int install_thread_keyring(void)
        if (!new)
                return -ENOMEM;
 
-       BUG_ON(new->thread_keyring);
-
        ret = install_thread_keyring_to_cred(new);
        if (ret < 0) {
                abort_creds(new);
@@ -170,17 +175,17 @@ static int install_thread_keyring(void)
 }
 
 /*
- * Install a process keyring directly to a credentials struct.
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already.  This is allowed to overrun the quota.
  *
- * Returns -EEXIST if there was already a process keyring, 0 if one installed,
- * and other value on any other error
+ * Return: 0 if a process keyring is now present; -errno on failure.
  */
 int install_process_keyring_to_cred(struct cred *new)
 {
        struct key *keyring;
 
        if (new->process_keyring)
-               return -EEXIST;
+               return 0;
 
        keyring = keyring_alloc("_pid", new->uid, new->gid, new,
                                KEY_POS_ALL | KEY_USR_VIEW,
@@ -194,11 +199,9 @@ int install_process_keyring_to_cred(struct cred *new)
 }
 
 /*
- * Make sure a process keyring is installed for the current process.  The
- * existing process keyring is not replaced.
+ * Install a process keyring to the current task if it didn't have one already.
  *
- * Returns 0 if there is a process keyring by the end of this function, some
- * error otherwise.
+ * Return: 0 if a process keyring is now present; -errno on failure.
  */
 static int install_process_keyring(void)
 {
@@ -212,14 +215,18 @@ static int install_process_keyring(void)
        ret = install_process_keyring_to_cred(new);
        if (ret < 0) {
                abort_creds(new);
-               return ret != -EEXIST ? ret : 0;
+               return ret;
        }
 
        return commit_creds(new);
 }
 
 /*
- * Install a session keyring directly to a credentials struct.
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any.  If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
  */
 int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
 {
@@ -254,8 +261,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
 }
 
 /*
- * Install a session keyring, discarding the old one.  If a keyring is not
- * supplied, an empty one is invented.
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any.  If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
  */
 static int install_session_keyring(struct key *keyring)
 {
index 3b693e924db745c0ec8be74171bb189f17dfc53d..12ba83367b1bc882f6d6fbab9329185d58125090 100644 (file)
 /* wait until all locks are released */
 void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
 {
-       int max_count = 5 * HZ;
+       int warn_count = 5 * HZ;
 
        if (atomic_read(lockp) < 0) {
                pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
                return;
        }
        while (atomic_read(lockp) > 0) {
-               if (max_count == 0) {
-                       pr_warn("ALSA: seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
-                       break;
-               }
+               if (warn_count-- == 0)
+                       pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
                schedule_timeout_uninterruptible(1);
-               max_count--;
        }
 }
 
index f6769312ebfccbe473ac291e81b386b48b45488f..c3768cd494a5f3fe3bb9059bd8e588c73b806c86 100644 (file)
@@ -45,7 +45,7 @@ struct snd_fw_async_midi_port {
 
        struct snd_rawmidi_substream *substream;
        snd_fw_async_midi_port_fill fill;
-       unsigned int consume_bytes;
+       int consume_bytes;
 };
 
 int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port,
index 74d7fb6efce6ca8258ece95a83c3460588a3e99e..413ab6313bb66515c284734ca92c87bef014c5fd 100644 (file)
@@ -227,11 +227,11 @@ static void do_registration(struct work_struct *work)
        if (err < 0)
                goto error;
 
-       err = detect_quirks(oxfw);
+       err = snd_oxfw_stream_discover(oxfw);
        if (err < 0)
                goto error;
 
-       err = snd_oxfw_stream_discover(oxfw);
+       err = detect_quirks(oxfw);
        if (err < 0)
                goto error;
 
index 5c7219fb3aa86738a49cff98bf6f16693f7a0192..9e2a3404a836bf919f68b6cbd33c8f569b3c8843 100644 (file)
@@ -621,7 +621,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
                .codec_dai_name = "snd-soc-dummy-dai",
                .codec_name = "snd-soc-dummy",
                .platform_name = "sst-mfld-platform",
-               .ignore_suspend = 1,
+               .nonatomic = true,
                .dynamic = 1,
                .dpcm_playback = 1,
                .dpcm_capture = 1,
@@ -634,7 +634,6 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
                .codec_dai_name = "snd-soc-dummy-dai",
                .codec_name = "snd-soc-dummy",
                .platform_name = "sst-mfld-platform",
-               .ignore_suspend = 1,
                .nonatomic = true,
                .dynamic = 1,
                .dpcm_playback = 1,
@@ -661,6 +660,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
                                                | SND_SOC_DAIFMT_CBS_CFS,
                .be_hw_params_fixup = byt_rt5640_codec_fixup,
                .ignore_suspend = 1,
+               .nonatomic = true,
                .dpcm_playback = 1,
                .dpcm_capture = 1,
                .init = byt_rt5640_init,
index 3186f015939fb5fce3e8393fd40463362464dbf8..8164bec63bf15b874da49a4406ae9f6124cd776c 100644 (file)
@@ -235,7 +235,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
                .codec_dai_name = "snd-soc-dummy-dai",
                .codec_name = "snd-soc-dummy",
                .platform_name = "sst-mfld-platform",
-               .ignore_suspend = 1,
                .nonatomic = true,
                .dynamic = 1,
                .dpcm_playback = 1,
@@ -249,7 +248,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
                .codec_dai_name = "snd-soc-dummy-dai",
                .codec_name = "snd-soc-dummy",
                .platform_name = "sst-mfld-platform",
-               .ignore_suspend = 1,
                .nonatomic = true,
                .dynamic = 1,
                .dpcm_playback = 1,
index 3e9b1c0bb1ce3cb1864e1825ade56720803df289..058bc99c6c3479e4d7a92c97a885f634b168105d 100644 (file)
@@ -933,6 +933,7 @@ static int soc_tplg_denum_create_texts(struct soc_enum *se,
                }
        }
 
+       se->texts = (const char * const *)se->dobj.control.dtexts;
        return 0;
 
 err:
index d487dd2ef016fee0f25d6a5705feb98a0f26356a..cfcb0ea9d99d8981abc2425d15f3e7148e0dc7a6 100644 (file)
@@ -1299,6 +1299,7 @@ struct uniperif {
        int ver; /* IP version, used by register access macros */
        struct regmap_field *clk_sel;
        struct regmap_field *valid_sel;
+       spinlock_t irq_lock; /* use to prevent race condition with IRQ */
 
        /* capabilities */
        const struct snd_pcm_hardware *hw;
index 60ae31a303ab001e5724518248f59ef12033570b..d7e8dd46d2cc40ba2c937a4ea627f4859fe1f478 100644 (file)
@@ -65,10 +65,13 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
        unsigned int status;
        unsigned int tmp;
 
-       if (player->state == UNIPERIF_STATE_STOPPED) {
-               /* Unexpected IRQ: do nothing */
-               return IRQ_NONE;
-       }
+       spin_lock(&player->irq_lock);
+       if (!player->substream)
+               goto irq_spin_unlock;
+
+       snd_pcm_stream_lock(player->substream);
+       if (player->state == UNIPERIF_STATE_STOPPED)
+               goto stream_unlock;
 
        /* Get interrupt status & clear them immediately */
        status = GET_UNIPERIF_ITS(player);
@@ -88,9 +91,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
                        SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player);
 
                        /* Stop the player */
-                       snd_pcm_stream_lock(player->substream);
                        snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
-                       snd_pcm_stream_unlock(player->substream);
                }
 
                ret = IRQ_HANDLED;
@@ -104,9 +105,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
                SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
 
                /* Stop the player */
-               snd_pcm_stream_lock(player->substream);
                snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
-               snd_pcm_stream_unlock(player->substream);
 
                ret = IRQ_HANDLED;
        }
@@ -116,7 +115,8 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
                if (!player->underflow_enabled) {
                        dev_err(player->dev,
                                "unexpected Underflow recovering\n");
-                       return -EPERM;
+                       ret = -EPERM;
+                       goto stream_unlock;
                }
                /* Read the underflow recovery duration */
                tmp = GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(player);
@@ -138,13 +138,16 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
                dev_err(player->dev, "Underflow recovery failed\n");
 
                /* Stop the player */
-               snd_pcm_stream_lock(player->substream);
                snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
-               snd_pcm_stream_unlock(player->substream);
 
                ret = IRQ_HANDLED;
        }
 
+stream_unlock:
+       snd_pcm_stream_unlock(player->substream);
+irq_spin_unlock:
+       spin_unlock(&player->irq_lock);
+
        return ret;
 }
 
@@ -588,6 +591,7 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *player = priv->dai_data.uni;
        struct snd_aes_iec958 *iec958 =  &player->stream_settings.iec958;
+       unsigned long flags;
 
        mutex_lock(&player->ctrl_lock);
        iec958->status[0] = ucontrol->value.iec958.status[0];
@@ -596,12 +600,14 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
        iec958->status[3] = ucontrol->value.iec958.status[3];
        mutex_unlock(&player->ctrl_lock);
 
+       spin_lock_irqsave(&player->irq_lock, flags);
        if (player->substream && player->substream->runtime)
                uni_player_set_channel_status(player,
                                              player->substream->runtime);
        else
                uni_player_set_channel_status(player, NULL);
 
+       spin_unlock_irqrestore(&player->irq_lock, flags);
        return 0;
 }
 
@@ -686,9 +692,12 @@ static int uni_player_startup(struct snd_pcm_substream *substream,
 {
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *player = priv->dai_data.uni;
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&player->irq_lock, flags);
        player->substream = substream;
+       spin_unlock_irqrestore(&player->irq_lock, flags);
 
        player->clk_adj = 0;
 
@@ -986,12 +995,15 @@ static void uni_player_shutdown(struct snd_pcm_substream *substream,
 {
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *player = priv->dai_data.uni;
+       unsigned long flags;
 
+       spin_lock_irqsave(&player->irq_lock, flags);
        if (player->state != UNIPERIF_STATE_STOPPED)
                /* Stop the player */
                uni_player_stop(player);
 
        player->substream = NULL;
+       spin_unlock_irqrestore(&player->irq_lock, flags);
 }
 
 static int uni_player_parse_dt_audio_glue(struct platform_device *pdev,
@@ -1096,6 +1108,7 @@ int uni_player_init(struct platform_device *pdev,
        }
 
        mutex_init(&player->ctrl_lock);
+       spin_lock_init(&player->irq_lock);
 
        /* Ensure that disabled by default */
        SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
index 93a8df6ed880ea8cc32b62f94ab0b44befbe06ef..ee0055e608529195fda9b76a4226ebf752ead6e3 100644 (file)
@@ -46,10 +46,15 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
        struct uniperif *reader = dev_id;
        unsigned int status;
 
+       spin_lock(&reader->irq_lock);
+       if (!reader->substream)
+               goto irq_spin_unlock;
+
+       snd_pcm_stream_lock(reader->substream);
        if (reader->state == UNIPERIF_STATE_STOPPED) {
                /* Unexpected IRQ: do nothing */
                dev_warn(reader->dev, "unexpected IRQ\n");
-               return IRQ_HANDLED;
+               goto stream_unlock;
        }
 
        /* Get interrupt status & clear them immediately */
@@ -60,13 +65,16 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
        if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
                dev_err(reader->dev, "FIFO error detected\n");
 
-               snd_pcm_stream_lock(reader->substream);
                snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
-               snd_pcm_stream_unlock(reader->substream);
 
-               return IRQ_HANDLED;
+               ret = IRQ_HANDLED;
        }
 
+stream_unlock:
+       snd_pcm_stream_unlock(reader->substream);
+irq_spin_unlock:
+       spin_unlock(&reader->irq_lock);
+
        return ret;
 }
 
@@ -347,9 +355,12 @@ static int uni_reader_startup(struct snd_pcm_substream *substream,
 {
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *reader = priv->dai_data.uni;
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&reader->irq_lock, flags);
        reader->substream = substream;
+       spin_unlock_irqrestore(&reader->irq_lock, flags);
 
        if (!UNIPERIF_TYPE_IS_TDM(reader))
                return 0;
@@ -375,12 +386,15 @@ static void uni_reader_shutdown(struct snd_pcm_substream *substream,
 {
        struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
        struct uniperif *reader = priv->dai_data.uni;
+       unsigned long flags;
 
+       spin_lock_irqsave(&reader->irq_lock, flags);
        if (reader->state != UNIPERIF_STATE_STOPPED) {
                /* Stop the reader */
                uni_reader_stop(reader);
        }
        reader->substream = NULL;
+       spin_unlock_irqrestore(&reader->irq_lock, flags);
 }
 
 static const struct snd_soc_dai_ops uni_reader_dai_ops = {
@@ -415,6 +429,8 @@ int uni_reader_init(struct platform_device *pdev,
                return -EBUSY;
        }
 
+       spin_lock_init(&reader->irq_lock);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(uni_reader_init);
index 93b0aa74ca03bada7db24ae827b902ce09591bef..39c2c7d067bba55cae5cf19983128369af1bb81c 100644 (file)
@@ -156,6 +156,7 @@ out:
                                         */
                        case 0x2C:      /* Westmere EP - Gulftown */
                                cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+                               break;
                        case 0x2A:      /* SNB */
                        case 0x2D:      /* SNB Xeon */
                        case 0x3A:      /* IVB */
index fedca32853262152cb7e1028139728c4aa677b11..ccf2a69365ccbb2a7cd27fa03089be54ce506c1a 100644 (file)
@@ -100,6 +100,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics
 \fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.  These numbers are from hardware residency counters.
 \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
 \fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
+\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
+\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
 \fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states.  These numbers are from hardware residency counters.
 \fBPkgWatt\fP Watts consumed by the whole package.
 \fBCorWatt\fP Watts consumed by the core part of the package.
index 828dccd3f01eaf324bf2d3d2c674a585417cd07a..b11294730771bed6766f77138460813f8abbfce8 100644 (file)
@@ -1142,7 +1142,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
                 * it is possible for mperf's non-halted cycles + idle states
                 * to exceed TSC's all cycles: show c1 = 0% in that case.
                 */
-               if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
+               if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak))
                        old->c1 = 0;
                else {
                        /* normal case, derive c1 */
@@ -2485,8 +2485,10 @@ int snapshot_gfx_mhz(void)
 
        if (fp == NULL)
                fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
-       else
+       else {
                rewind(fp);
+               fflush(fp);
+       }
 
        retval = fscanf(fp, "%d", &gfx_cur_mhz);
        if (retval != 1)
@@ -3111,7 +3113,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return 0;
 
        fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
-                       "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n",
+                       "(high %d guar %d eff %d low %d)\n",
                        cpu, msr,
                        (unsigned int)HWP_HIGHEST_PERF(msr),
                        (unsigned int)HWP_GUARANTEED_PERF(msr),
@@ -3122,7 +3124,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return 0;
 
        fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
-                       "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n",
+                       "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n",
                        cpu, msr,
                        (unsigned int)(((msr) >> 0) & 0xff),
                        (unsigned int)(((msr) >> 8) & 0xff),
@@ -3136,7 +3138,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                        return 0;
 
                fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
-                       "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n",
+                       "(min %d max %d des %d epp 0x%x window 0x%x)\n",
                        cpu, msr,
                        (unsigned int)(((msr) >> 0) & 0xff),
                        (unsigned int)(((msr) >> 8) & 0xff),
@@ -3353,17 +3355,19 @@ void rapl_probe(unsigned int family, unsigned int model)
        case INTEL_FAM6_SKYLAKE_DESKTOP:        /* SKL */
        case INTEL_FAM6_KABYLAKE_MOBILE:        /* KBL */
        case INTEL_FAM6_KABYLAKE_DESKTOP:       /* KBL */
-               do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
+               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
                BIC_PRESENT(BIC_PKG__);
                BIC_PRESENT(BIC_RAM__);
                if (rapl_joules) {
                        BIC_PRESENT(BIC_Pkg_J);
                        BIC_PRESENT(BIC_Cor_J);
                        BIC_PRESENT(BIC_RAM_J);
+                       BIC_PRESENT(BIC_GFX_J);
                } else {
                        BIC_PRESENT(BIC_PkgWatt);
                        BIC_PRESENT(BIC_CorWatt);
                        BIC_PRESENT(BIC_RAMWatt);
+                       BIC_PRESENT(BIC_GFXWatt);
                }
                break;
        case INTEL_FAM6_HASWELL_X:      /* HSX */
@@ -3478,7 +3482,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 {
        unsigned long long msr;
-       unsigned int dts;
+       unsigned int dts, dts2;
        int cpu;
 
        if (!(do_dts || do_ptm))
@@ -3503,7 +3507,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
                        cpu, msr, tcc_activation_temp - dts);
 
-#ifdef THERM_DEBUG
                if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
                        return 0;
 
@@ -3511,11 +3514,10 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                dts2 = (msr >> 8) & 0x7F;
                fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
                        cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
-#endif
        }
 
 
-       if (do_dts) {
+       if (do_dts && debug) {
                unsigned int resolution;
 
                if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
@@ -3526,7 +3528,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
                        cpu, msr, tcc_activation_temp - dts, resolution);
 
-#ifdef THERM_DEBUG
                if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
                        return 0;
 
@@ -3534,7 +3535,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                dts2 = (msr >> 8) & 0x7F;
                fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
                        cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
-#endif
        }
 
        return 0;
@@ -4578,7 +4578,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 17.02.24"
+       fprintf(outf, "turbostat version 17.04.12"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
index a0aa2009b0e0a81e65672eb795039a2172484c55..20f1871874df54a7d567797c753e379a760e492c 100644 (file)
@@ -282,7 +282,7 @@ static void test_arraymap_percpu(int task, void *data)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        int key, next_key, fd, i;
-       long values[nr_cpus];
+       long long values[nr_cpus];
 
        fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
                            sizeof(values[0]), 2, 0);
@@ -340,7 +340,7 @@ static void test_arraymap_percpu_many_keys(void)
         * allocator more than anything else
         */
        unsigned int nr_keys = 2000;
-       long values[nr_cpus];
+       long long values[nr_cpus];
        int key, fd, i;
 
        fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
new file mode 100644 (file)
index 0000000..bab5ff7
--- /dev/null
@@ -0,0 +1,117 @@
+#!/bin/sh
+# description: ftrace - function pid filters
+
+# Make sure that function pid matching filter works.
+# Also test it on an instance directory
+
+if ! grep -q function available_tracers; then
+    echo "no function tracer configured"
+    exit_unsupported
+fi
+
+if [ ! -f set_ftrace_pid ]; then
+    echo "set_ftrace_pid not found? Is function tracer not set?"
+    exit_unsupported
+fi
+
+if [ ! -f set_ftrace_filter ]; then
+    echo "set_ftrace_filter not found? Is function tracer not set?"
+    exit_unsupported
+fi
+
+do_function_fork=1
+
+if [ ! -f options/function-fork ]; then
+    do_function_fork=0
+    echo "no option for function-fork found. Option will not be tested."
+fi
+
+read PID _ < /proc/self/stat
+
+if [ $do_function_fork -eq 1 ]; then
+    # default value of function-fork option
+    orig_value=`grep function-fork trace_options`
+fi
+
+do_reset() {
+    reset_tracer
+    clear_trace
+    enable_tracing
+    echo > set_ftrace_filter
+    echo > set_ftrace_pid
+
+    if [ $do_function_fork -eq 0 ]; then
+       return
+    fi
+
+    echo $orig_value > trace_options
+}
+
+fail() { # msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+yield() {
+    ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
+}
+
+do_test() {
+    disable_tracing
+
+    echo do_execve* > set_ftrace_filter
+    echo *do_fork >> set_ftrace_filter
+
+    echo $PID > set_ftrace_pid
+    echo function > current_tracer
+
+    if [ $do_function_fork -eq 1 ]; then
+       # don't allow children to be traced
+       echo nofunction-fork > trace_options
+    fi
+
+    enable_tracing
+    yield
+
+    count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
+    count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
+
+    # count_other should be 0
+    if [ $count_pid -eq 0 -o $count_other -ne 0 ]; then
+       fail "PID filtering not working?"
+    fi
+
+    disable_tracing
+    clear_trace
+
+    if [ $do_function_fork -eq 0 ]; then
+       return
+    fi
+
+    # allow children to be traced
+    echo function-fork > trace_options
+
+    enable_tracing
+    yield
+
+    count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
+    count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
+
+    # count_other should NOT be 0
+    if [ $count_pid -eq 0 -o $count_other -eq 0 ]; then
+       fail "PID filtering not following fork?"
+    fi
+}
+
+do_test
+
+mkdir instances/foo
+cd instances/foo
+do_test
+cd ../../
+rmdir instances/foo
+
+do_reset
+
+exit 0
index 4124593696862fcb370fee15bcf7e34e11cdf9e0..e62bb354820cacdc653a19db385e3bd497931d0b 100644 (file)
@@ -75,7 +75,7 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
 {
        int fd, val;
 
-       fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP));
+       fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_IP));
        if (fd < 0) {
                perror("socket packet");
                exit(1);
@@ -95,6 +95,24 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
        return fd;
 }
 
+static void sock_fanout_set_cbpf(int fd)
+{
+       struct sock_filter bpf_filter[] = {
+               BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 80),           /* ldb [80] */
+               BPF_STMT(BPF_RET+BPF_A, 0),                   /* ret A */
+       };
+       struct sock_fprog bpf_prog;
+
+       bpf_prog.filter = bpf_filter;
+       bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
+
+       if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &bpf_prog,
+                      sizeof(bpf_prog))) {
+               perror("fanout data cbpf");
+               exit(1);
+       }
+}
+
 static void sock_fanout_set_ebpf(int fd)
 {
        const int len_off = __builtin_offsetof(struct __sk_buff, len);
@@ -270,7 +288,7 @@ static int test_datapath(uint16_t typeflags, int port_off,
                exit(1);
        }
        if (type == PACKET_FANOUT_CBPF)
-               sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA);
+               sock_fanout_set_cbpf(fds[0]);
        else if (type == PACKET_FANOUT_EBPF)
                sock_fanout_set_ebpf(fds[0]);
 
index a77da88bf9469d515713e1d1f0f6d318544ac458..7d990d6c861b5863f23fe1d29523707c48ac28d5 100644 (file)
@@ -38,7 +38,7 @@
 # define __maybe_unused                __attribute__ ((__unused__))
 #endif
 
-static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
+static __maybe_unused void pair_udp_setfilter(int fd)
 {
        /* the filter below checks for all of the following conditions that
         * are based on the contents of create_payload()
@@ -76,23 +76,16 @@ static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
        };
        struct sock_fprog bpf_prog;
 
-       if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
-               bpf_filter[5].code = 0x16;   /* RET A                         */
-
        bpf_prog.filter = bpf_filter;
        bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
-       if (setsockopt(fd, lvl, optnum, &bpf_prog,
+
+       if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
                       sizeof(bpf_prog))) {
                perror("setsockopt SO_ATTACH_FILTER");
                exit(1);
        }
 }
 
-static __maybe_unused void pair_udp_setfilter(int fd)
-{
-       sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
-}
-
 static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
 {
        struct sockaddr_in saddr, daddr;