]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branches 'acpi-scan', 'acpi-tables' and 'acpi-platform'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 28 Apr 2017 21:17:36 +0000 (23:17 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 28 Apr 2017 21:17:36 +0000 (23:17 +0200)
* acpi-scan:
  ACPI / scan: Avoid enumerating devices more than once
  ACPI / scan: Apply default enumeration to devices with ACPI drivers
  ACPI / scan: Drop support for force_remove

* acpi-tables:
  ACPI / tables: Drop acpi_parse_entries() which is not used

* acpi-platform:
  ACPI / platform: Update platform device NUMA node based on _PXM method

102 files changed:
Documentation/ABI/obsolete/sysfs-firmware-acpi [new file with mode: 0644]
Documentation/ABI/testing/sysfs-firmware-acpi
Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
MAINTAINERS
Makefile
arch/parisc/include/asm/uaccess.h
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/s390/include/asm/pgtable.h
arch/sparc/Kconfig
arch/sparc/mm/hugetlbpage.c
arch/x86/kernel/cpu/mcheck/mce-genpool.c
arch/x86/kernel/cpu/mcheck/mce-internal.h
arch/x86/kernel/cpu/mcheck/mce.c
block/blk-mq.c
block/elevator.c
crypto/ahash.c
crypto/algif_aead.c
crypto/lrw.c
crypto/xts.c
drivers/acpi/acpi_platform.c
drivers/acpi/internal.h
drivers/acpi/power.c
drivers/acpi/scan.c
drivers/acpi/sysfs.c
drivers/block/mtip32xx/mtip32xx.c
drivers/clk/clk-stm32f4.c
drivers/clk/sunxi-ng/Kconfig
drivers/clk/sunxi-ng/ccu-sun8i-a33.c
drivers/clk/sunxi-ng/ccu_common.c
drivers/clk/sunxi-ng/ccu_common.h
drivers/hid/wacom_wac.c
drivers/input/mouse/elantech.c
drivers/mmc/core/sdio_bus.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mtd/ubi/upd.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/falcon/efx.c
drivers/net/phy/dp83640.c
drivers/net/phy/micrel.c
drivers/net/usb/ch9200.c
drivers/net/usb/cx82310_eth.c
drivers/net/usb/kaweth.c
drivers/net/usb/lan78xx.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/sr9700.c
drivers/net/vrf.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/pci/dwc/pcie-hisi.c
drivers/video/backlight/pwm_bl.c
fs/cifs/smb1ops.c
fs/nfsd/nfs4proc.c
fs/nsfs.c
fs/ubifs/debug.c
fs/ubifs/dir.c
include/crypto/internal/hash.h
include/linux/mmc/sdio_func.h
include/uapi/linux/ipv6_route.h
kernel/bpf/syscall.c
kernel/irq/affinity.c
kernel/locking/lockdep_internals.h
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
lib/Kconfig.debug
mm/migrate.c
mm/page_alloc.c
mm/vmstat.c
net/core/netpoll.c
net/core/skbuff.c
net/ipv4/ip_sockglue.c
net/ipv4/ipmr.c
net/ipv4/raw.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/ip6_input.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/ipv6/seg6.c
net/key/af_key.c
net/mac80211/rx.c
net/qrtr/qrtr.c
net/sched/act_api.c
security/keys/gc.c
security/keys/keyctl.c
security/keys/process_keys.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc [new file with mode: 0644]
tools/testing/selftests/net/psock_fanout.c
tools/testing/selftests/net/psock_lib.h

diff --git a/Documentation/ABI/obsolete/sysfs-firmware-acpi b/Documentation/ABI/obsolete/sysfs-firmware-acpi
new file mode 100644 (file)
index 0000000..6715a71
--- /dev/null
@@ -0,0 +1,8 @@
+What:          /sys/firmware/acpi/hotplug/force_remove
+Date:          Mar 2017
+Contact:       Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Description:
+               Since the force_remove is inherently broken and dangerous to
+               use for some hotplugable resources like memory (because ignoring
+               the offline failure might lead to memory corruption and crashes)
+               enabling this knob is not safe and thus unsupported.
index c7fc72d4495ca0cbd2203e40cfba522bdb2fcfa8..613f42a9d5cdcd7b6a9f754b59387065daa1576f 100644 (file)
@@ -44,16 +44,6 @@ Description:
                or 0 (unset).  Attempts to write any other values to it will
                cause -EINVAL to be returned.
 
-What:          /sys/firmware/acpi/hotplug/force_remove
-Date:          May 2013
-Contact:       Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-Description:
-               The number in this file (0 or 1) determines whether (1) or not
-               (0) the ACPI subsystem will allow devices to be hot-removed even
-               if they cannot be put offline gracefully (from the kernel's
-               viewpoint).  That number can be changed by writing a boolean
-               value to this file.
-
 What:          /sys/firmware/acpi/interrupts/
 Date:          February 2008
 Contact:       Len Brown <lenb@kernel.org>
index b7fa3b97986d5fff391bf4ff1a01bdd25ce096e9..a339dbb154933282ee06aaeec0571e0d95a72b42 100644 (file)
@@ -44,13 +44,19 @@ Hip05 Example (note that Hip06 is the same except compatible):
        };
 
 HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description.
+
+Some BIOSes place the host controller in a mode where it is ECAM
+compliant for all devices other than the root complex. In such cases,
+the host controller should be described as below.
+
 The properties and their meanings are identical to those described in
 host-generic-pci.txt except as listed below.
 
 Properties of the host controller node that differ from
 host-generic-pci.txt:
 
-- compatible     : Must be "hisilicon,pcie-almost-ecam"
+- compatible     : Must be "hisilicon,hip06-pcie-ecam", or
+                  "hisilicon,hip07-pcie-ecam"
 
 - reg            : Two entries: First the ECAM configuration space for any
                   other bus underneath the root bus. Second, the base
@@ -59,7 +65,7 @@ host-generic-pci.txt:
 
 Example:
        pcie0: pcie@a0090000 {
-               compatible = "hisilicon,pcie-almost-ecam";
+               compatible = "hisilicon,hip06-pcie-ecam";
                reg = <0 0xb0000000 0 0x2000000>,  /*  ECAM configuration space */
                      <0 0xa0090000 0 0x10000>; /* host bridge registers */
                bus-range = <0  31>;
index 676c139bc883ef7906e854174b8177ef282e0c51..38d3e4ed7208bb3969abcbfc76c4e68fd90d12f5 100644 (file)
@@ -2585,12 +2585,26 @@ F:      include/uapi/linux/if_bonding.h
 
 BPF (Safe dynamic programs and tools)
 M:     Alexei Starovoitov <ast@kernel.org>
+M:     Daniel Borkmann <daniel@iogearbox.net>
 L:     netdev@vger.kernel.org
 L:     linux-kernel@vger.kernel.org
 S:     Supported
+F:     arch/x86/net/bpf_jit*
+F:     Documentation/networking/filter.txt
+F:     include/linux/bpf*
+F:     include/linux/filter.h
+F:     include/uapi/linux/bpf*
+F:     include/uapi/linux/filter.h
 F:     kernel/bpf/
-F:     tools/testing/selftests/bpf/
+F:     kernel/trace/bpf_trace.c
 F:     lib/test_bpf.c
+F:     net/bpf/
+F:     net/core/filter.c
+F:     net/sched/act_bpf.c
+F:     net/sched/cls_bpf.c
+F:     samples/bpf/
+F:     tools/net/bpf*
+F:     tools/testing/selftests/bpf/
 
 BROADCOM B44 10/100 ETHERNET DRIVER
 M:     Michael Chan <michael.chan@broadcom.com>
@@ -8761,6 +8775,7 @@ W:        http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+B:     mailto:netdev@vger.kernel.org
 S:     Maintained
 F:     net/
 F:     include/net/
@@ -12464,7 +12479,6 @@ F:      drivers/clk/ti/
 F:     include/linux/clk/ti.h
 
 TI ETHERNET SWITCH DRIVER (CPSW)
-M:     Mugunthan V N <mugunthanvnm@ti.com>
 R:     Grygorii Strashko <grygorii.strashko@ti.com>
 L:     linux-omap@vger.kernel.org
 L:     netdev@vger.kernel.org
index 5039b9148d15de5762a2a33147535569d5be5746..77930269545306e2a2e8e3b0d69900ab1dfec9c1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 8442727f28d2732eecb583d46bdcc88258a590dd..cbd4f4af8108bc8fa9ec36504589eac7107e6b1c 100644 (file)
 #define get_user __get_user
 
 #if !defined(CONFIG_64BIT)
-#define LDD_USER(ptr)          __get_user_asm64(ptr)
+#define LDD_USER(val, ptr)     __get_user_asm64(val, ptr)
 #define STD_USER(x, ptr)       __put_user_asm64(x, ptr)
 #else
-#define LDD_USER(ptr)          __get_user_asm("ldd", ptr)
+#define LDD_USER(val, ptr)     __get_user_asm(val, "ldd", ptr)
 #define STD_USER(x, ptr)       __put_user_asm("std", x, ptr)
 #endif
 
@@ -97,63 +97,87 @@ struct exception_data {
                " mtsp %0,%%sr2\n\t"            \
                : : "r"(get_fs()) : )
 
-#define __get_user(x, ptr)                               \
-({                                                       \
-       register long __gu_err __asm__ ("r8") = 0;       \
-       register long __gu_val;                          \
-                                                        \
-       load_sr2();                                      \
-       switch (sizeof(*(ptr))) {                        \
-           case 1: __get_user_asm("ldb", ptr); break;   \
-           case 2: __get_user_asm("ldh", ptr); break;   \
-           case 4: __get_user_asm("ldw", ptr); break;   \
-           case 8: LDD_USER(ptr);  break;               \
-           default: BUILD_BUG(); break;                 \
-       }                                                \
-                                                        \
-       (x) = (__force __typeof__(*(ptr))) __gu_val;     \
-       __gu_err;                                        \
+#define __get_user_internal(val, ptr)                  \
+({                                                     \
+       register long __gu_err __asm__ ("r8") = 0;      \
+                                                       \
+       switch (sizeof(*(ptr))) {                       \
+       case 1: __get_user_asm(val, "ldb", ptr); break; \
+       case 2: __get_user_asm(val, "ldh", ptr); break; \
+       case 4: __get_user_asm(val, "ldw", ptr); break; \
+       case 8: LDD_USER(val, ptr); break;              \
+       default: BUILD_BUG();                           \
+       }                                               \
+                                                       \
+       __gu_err;                                       \
 })
 
-#define __get_user_asm(ldx, ptr)                        \
+#define __get_user(val, ptr)                           \
+({                                                     \
+       load_sr2();                                     \
+       __get_user_internal(val, ptr);                  \
+})
+
+#define __get_user_asm(val, ldx, ptr)                  \
+{                                                      \
+       register long __gu_val;                         \
+                                                       \
        __asm__("1: " ldx " 0(%%sr2,%2),%0\n"           \
                "9:\n"                                  \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
                : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err));
+               : "r"(ptr), "1"(__gu_err));             \
+                                                       \
+       (val) = (__force __typeof__(*(ptr))) __gu_val;  \
+}
 
 #if !defined(CONFIG_64BIT)
 
-#define __get_user_asm64(ptr)                          \
+#define __get_user_asm64(val, ptr)                     \
+{                                                      \
+       union {                                         \
+               unsigned long long      l;              \
+               __typeof__(*(ptr))      t;              \
+       } __gu_tmp;                                     \
+                                                       \
        __asm__("   copy %%r0,%R0\n"                    \
                "1: ldw 0(%%sr2,%2),%0\n"               \
                "2: ldw 4(%%sr2,%2),%R0\n"              \
                "9:\n"                                  \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
-               : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err));
+               : "=&r"(__gu_tmp.l), "=r"(__gu_err)     \
+               : "r"(ptr), "1"(__gu_err));             \
+                                                       \
+       (val) = __gu_tmp.t;                             \
+}
 
 #endif /* !defined(CONFIG_64BIT) */
 
 
-#define __put_user(x, ptr)                                      \
+#define __put_user_internal(x, ptr)                            \
 ({                                                             \
        register long __pu_err __asm__ ("r8") = 0;              \
         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);      \
                                                                \
-       load_sr2();                                             \
        switch (sizeof(*(ptr))) {                               \
-           case 1: __put_user_asm("stb", __x, ptr); break;     \
-           case 2: __put_user_asm("sth", __x, ptr); break;     \
-           case 4: __put_user_asm("stw", __x, ptr); break;     \
-           case 8: STD_USER(__x, ptr); break;                  \
-           default: BUILD_BUG(); break;                        \
-       }                                                       \
+       case 1: __put_user_asm("stb", __x, ptr); break;         \
+       case 2: __put_user_asm("sth", __x, ptr); break;         \
+       case 4: __put_user_asm("stw", __x, ptr); break;         \
+       case 8: STD_USER(__x, ptr); break;                      \
+       default: BUILD_BUG();                                   \
+       }                                                       \
                                                                \
        __pu_err;                                               \
 })
 
+#define __put_user(x, ptr)                                     \
+({                                                             \
+       load_sr2();                                             \
+       __put_user_internal(x, ptr);                            \
+})
+
+
 /*
  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  * instead of writing. This is because they do not write to any memory
index 14752eee3d0c44816a61b395970186c1e56d4a21..ed3beadd2cc515d1b8a99b4836b3f06c1a97a87c 100644 (file)
@@ -236,9 +236,9 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
        mtctr   reg;                                                    \
        bctr
 
-#define BRANCH_LINK_TO_FAR(reg, label)                                 \
-       __LOAD_FAR_HANDLER(reg, label);                                 \
-       mtctr   reg;                                                    \
+#define BRANCH_LINK_TO_FAR(label)                                      \
+       __LOAD_FAR_HANDLER(r12, label);                                 \
+       mtctr   r12;                                                    \
        bctrl
 
 /*
@@ -265,7 +265,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 #define BRANCH_TO_COMMON(reg, label)                                   \
        b       label
 
-#define BRANCH_LINK_TO_FAR(reg, label)                                 \
+#define BRANCH_LINK_TO_FAR(label)                                      \
        bl      label
 
 #define BRANCH_TO_KVM(reg, label)                                      \
index 6432d4bf08c889c128803cc5505546122118b964..767ef6d68c9ebf379dad6f065bcc0279a3021bb9 100644 (file)
@@ -689,7 +689,7 @@ resume_kernel:
 
        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
 
-       lwz     r3,GPR1(r1)
+       l     r3,GPR1(r1)
        subi    r3,r3,INT_FRAME_SIZE    /* dst: Allocate a trampoline exception frame */
        mr      r4,r1                   /* src:  current exception frame */
        mr      r1,r3                   /* Reroute the trampoline frame to r1 */
@@ -703,8 +703,8 @@ resume_kernel:
        addi    r6,r6,8
        bdnz    2b
 
-       /* Do real store operation to complete stwu */
-       lwz     r5,GPR1(r1)
+       /* Do real store operation to complete stdu */
+       l     r5,GPR1(r1)
        std     r8,0(r5)
 
        /* Clear _TIF_EMULATE_STACK_STORE flag */
index 857bf7c5b9465aff743b3e62094cc47d808c8c92..6353019966e6a3bd15afa32d7318c47810a1d8e9 100644 (file)
@@ -982,7 +982,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
        EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
        EXCEPTION_PROLOG_COMMON_3(0xe60)
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode)
+       BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */
        /* Windup the stack. */
        /* Move original HSRR0 and HSRR1 into the respective regs */
        ld      r9,_MSR(r1)
index 93e37b12e88237766821369e19827e5e2d844a1b..ecec682bb5166a41d9c86025fd3f4e9461f6ec71 100644 (file)
@@ -1051,6 +1051,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 {
        if (!MACHINE_HAS_NX)
                pte_val(entry) &= ~_PAGE_NOEXEC;
+       if (pte_present(entry))
+               pte_val(entry) &= ~_PAGE_UNUSED;
        if (mm_has_pgste(mm))
                ptep_set_pte_at(mm, addr, ptep, entry);
        else
index 68ac5c7cd982619581dfa65b75ac89aa8e359554..a59deaef21e5c7be78ba239b69dc2baecb69e8cb 100644 (file)
@@ -43,7 +43,7 @@ config SPARC
        select ARCH_HAS_SG_CHAIN
        select CPU_NO_EFFICIENT_FFS
        select HAVE_ARCH_HARDENED_USERCOPY
-       select PROVE_LOCKING_SMALL if PROVE_LOCKING
+       select LOCKDEP_SMALL if LOCKDEP
        select ARCH_WANT_RELAX_ORDER
 
 config SPARC32
index ee5273ad918de6302cb0f6644b2a4179f8c26351..7c29d38e6b99c68c5ac746eb3d3a3fe1da8394a5 100644 (file)
@@ -461,6 +461,22 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
        pgd_t *pgd;
        unsigned long next;
 
+       addr &= PMD_MASK;
+       if (addr < floor) {
+               addr += PMD_SIZE;
+               if (!addr)
+                       return;
+       }
+       if (ceiling) {
+               ceiling &= PMD_MASK;
+               if (!ceiling)
+                       return;
+       }
+       if (end - 1 > ceiling - 1)
+               end -= PMD_SIZE;
+       if (addr > end - 1)
+               return;
+
        pgd = pgd_offset(tlb->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
index 1e5a50c11d3c3546a59d286cbb84c402682cfb2b..217cd4449bc9db3aedfd6367529bc9ca99fb8443 100644 (file)
@@ -85,7 +85,7 @@ void mce_gen_pool_process(struct work_struct *__unused)
        head = llist_reverse_order(head);
        llist_for_each_entry_safe(node, tmp, head, llnode) {
                mce = &node->mce;
-               atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
+               blocking_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
                gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
        }
 }
index 903043e6a62b36a2c395c7aab52da6f85e34fc11..19592ba1a320030a4cbdc59aa8194f14bcefae69 100644 (file)
@@ -13,7 +13,7 @@ enum severity_level {
        MCE_PANIC_SEVERITY,
 };
 
-extern struct atomic_notifier_head x86_mce_decoder_chain;
+extern struct blocking_notifier_head x86_mce_decoder_chain;
 
 #define ATTR_LEN               16
 #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */
index 5accfbdee3f06fac48eaf4423d66d7bee1e3a0a3..af44ebeb593fb60f70bb497de3e8f09b4b988333 100644 (file)
@@ -123,7 +123,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
  * CPU/chipset specific EDAC code can register a notifier call here to print
  * MCE errors in a human-readable form.
  */
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
 
 /* Do initial initialization of a struct mce */
 void mce_setup(struct mce *m)
@@ -220,7 +220,7 @@ void mce_register_decode_chain(struct notifier_block *nb)
 
        WARN_ON(nb->priority > MCE_PRIO_LOWEST && nb->priority < MCE_PRIO_EDAC);
 
-       atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
+       blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
 }
 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
 
@@ -228,7 +228,7 @@ void mce_unregister_decode_chain(struct notifier_block *nb)
 {
        atomic_dec(&num_notifiers);
 
-       atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
+       blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
 }
 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
 
@@ -321,18 +321,7 @@ static void __print_mce(struct mce *m)
 
 static void print_mce(struct mce *m)
 {
-       int ret = 0;
-
        __print_mce(m);
-
-       /*
-        * Print out human-readable details about the MCE error,
-        * (if the CPU has an implementation for that)
-        */
-       ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
-       if (ret == NOTIFY_STOP)
-               return;
-
        pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
 }
 
index 572966f495966b7fe8ce486ad4e5912a0d044eb5..c7836a1ded973e23026143edb95e36a4e86ad294 100644 (file)
@@ -2928,8 +2928,17 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
        hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
        if (!blk_qc_t_is_internal(cookie))
                rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
-       else
+       else {
                rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
+               /*
+                * With scheduling, if the request has completed, we'll
+                * get a NULL return here, as we clear the sched tag when
+                * that happens. The request still remains valid, like always,
+                * so we should be safe with just the NULL check.
+                */
+               if (!rq)
+                       return false;
+       }
 
        return __blk_mq_poll(hctx, rq);
 }
index dbeecf7be719eaada4457ed3c7ac799fd0bbacec..4d9084a14c1093b1c00d79f079663a643c5d05fe 100644 (file)
@@ -1098,12 +1098,20 @@ int elevator_change(struct request_queue *q, const char *name)
 }
 EXPORT_SYMBOL(elevator_change);
 
+static inline bool elv_support_iosched(struct request_queue *q)
+{
+       if (q->mq_ops && q->tag_set && (q->tag_set->flags &
+                               BLK_MQ_F_NO_SCHED))
+               return false;
+       return true;
+}
+
 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
                          size_t count)
 {
        int ret;
 
-       if (!(q->mq_ops || q->request_fn))
+       if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
                return count;
 
        ret = __elevator_change(q, name);
@@ -1135,7 +1143,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
                        len += sprintf(name+len, "[%s] ", elv->elevator_name);
                        continue;
                }
-               if (__e->uses_mq && q->mq_ops)
+               if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
                        len += sprintf(name+len, "%s ", __e->elevator_name);
                else if (!__e->uses_mq && !q->mq_ops)
                        len += sprintf(name+len, "%s ", __e->elevator_name);
index e58c4970c22b7cc1cdb5e8f08875d9c1e7714568..826cd7ab4d4a2ec830438b40e987e230bd03f32f 100644 (file)
@@ -32,6 +32,7 @@ struct ahash_request_priv {
        crypto_completion_t complete;
        void *data;
        u8 *result;
+       u32 flags;
        void *ubuf[] CRYPTO_MINALIGN_ATTR;
 };
 
@@ -253,6 +254,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
        priv->result = req->result;
        priv->complete = req->base.complete;
        priv->data = req->base.data;
+       priv->flags = req->base.flags;
+
        /*
         * WARNING: We do not backup req->priv here! The req->priv
         *          is for internal use of the Crypto API and the
@@ -267,38 +270,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
        return 0;
 }
 
-static void ahash_restore_req(struct ahash_request *req)
+static void ahash_restore_req(struct ahash_request *req, int err)
 {
        struct ahash_request_priv *priv = req->priv;
 
+       if (!err)
+               memcpy(priv->result, req->result,
+                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
        /* Restore the original crypto request. */
        req->result = priv->result;
-       req->base.complete = priv->complete;
-       req->base.data = priv->data;
+
+       ahash_request_set_callback(req, priv->flags,
+                                  priv->complete, priv->data);
        req->priv = NULL;
 
        /* Free the req->priv.priv from the ADJUSTED request. */
        kzfree(priv);
 }
 
-static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+static void ahash_notify_einprogress(struct ahash_request *req)
 {
        struct ahash_request_priv *priv = req->priv;
+       struct crypto_async_request oreq;
 
-       if (err == -EINPROGRESS)
-               return;
-
-       if (!err)
-               memcpy(priv->result, req->result,
-                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+       oreq.data = priv->data;
 
-       ahash_restore_req(req);
+       priv->complete(&oreq, -EINPROGRESS);
 }
 
 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
 {
        struct ahash_request *areq = req->data;
 
+       if (err == -EINPROGRESS) {
+               ahash_notify_einprogress(areq);
+               return;
+       }
+
        /*
         * Restore the original request, see ahash_op_unaligned() for what
         * goes where.
@@ -309,7 +318,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
         */
 
        /* First copy req->result into req->priv.result */
-       ahash_op_unaligned_finish(areq, err);
+       ahash_restore_req(areq, err);
 
        /* Complete the ORIGINAL request. */
        areq->base.complete(&areq->base, err);
@@ -325,7 +334,12 @@ static int ahash_op_unaligned(struct ahash_request *req,
                return err;
 
        err = op(req);
-       ahash_op_unaligned_finish(req, err);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
+
+       ahash_restore_req(req, err);
 
        return err;
 }
@@ -360,25 +374,14 @@ int crypto_ahash_digest(struct ahash_request *req)
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
 
-static void ahash_def_finup_finish2(struct ahash_request *req, int err)
+static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
 {
-       struct ahash_request_priv *priv = req->priv;
+       struct ahash_request *areq = req->data;
 
        if (err == -EINPROGRESS)
                return;
 
-       if (!err)
-               memcpy(priv->result, req->result,
-                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
-       ahash_restore_req(req);
-}
-
-static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
-{
-       struct ahash_request *areq = req->data;
-
-       ahash_def_finup_finish2(areq, err);
+       ahash_restore_req(areq, err);
 
        areq->base.complete(&areq->base, err);
 }
@@ -389,11 +392,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
                goto out;
 
        req->base.complete = ahash_def_finup_done2;
-       req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
        err = crypto_ahash_reqtfm(req)->final(req);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
 
 out:
-       ahash_def_finup_finish2(req, err);
+       ahash_restore_req(req, err);
        return err;
 }
 
@@ -401,7 +408,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
 {
        struct ahash_request *areq = req->data;
 
+       if (err == -EINPROGRESS) {
+               ahash_notify_einprogress(areq);
+               return;
+       }
+
+       areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
        err = ahash_def_finup_finish1(areq, err);
+       if (areq->priv)
+               return;
 
        areq->base.complete(&areq->base, err);
 }
@@ -416,6 +432,11 @@ static int ahash_def_finup(struct ahash_request *req)
                return err;
 
        err = tfm->update(req);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
+
        return ahash_def_finup_finish1(req, err);
 }
 
index 5a805375865731f4cc7e94790362c208dd58d05b..ef59d9926ee99bccfbdc6077451cb95008d07651 100644 (file)
@@ -40,6 +40,7 @@ struct aead_async_req {
        struct aead_async_rsgl first_rsgl;
        struct list_head list;
        struct kiocb *iocb;
+       struct sock *sk;
        unsigned int tsgls;
        char iv[];
 };
@@ -379,12 +380,10 @@ unlock:
 
 static void aead_async_cb(struct crypto_async_request *_req, int err)
 {
-       struct sock *sk = _req->data;
-       struct alg_sock *ask = alg_sk(sk);
-       struct aead_ctx *ctx = ask->private;
-       struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
-       struct aead_request *req = aead_request_cast(_req);
+       struct aead_request *req = _req->data;
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
+       struct sock *sk = areq->sk;
        struct scatterlist *sg = areq->tsgl;
        struct aead_async_rsgl *rsgl;
        struct kiocb *iocb = areq->iocb;
@@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
        memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
        INIT_LIST_HEAD(&areq->list);
        areq->iocb = msg->msg_iocb;
+       areq->sk = sk;
        memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
        aead_request_set_tfm(req, tfm);
        aead_request_set_ad(req, ctx->aead_assoclen);
        aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                 aead_async_cb, sk);
+                                 aead_async_cb, req);
        used -= ctx->aead_assoclen;
 
        /* take over all tx sgls from ctx */
index 3ea095adafd9af0067f695c4c70e2af702f70923..a8bfae4451bfcbfd26e25bbb39280818dcc139d4 100644 (file)
@@ -345,6 +345,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -352,6 +359,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
@@ -389,6 +397,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -396,6 +411,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
index c976bfac29da526844f6a5578fea1455945c26f3..89ace5ebc2da88df3e049e89e1d2640b2e361e3d 100644 (file)
@@ -286,6 +286,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -293,6 +300,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
@@ -330,6 +338,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -337,6 +352,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
index 03250e1f11039b99e6a25e1650b9d67c3cee41c9..88cd949003f3e397175579621b2513da257d7895 100644 (file)
@@ -121,11 +121,14 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
        if (IS_ERR(pdev))
                dev_err(&adev->dev, "platform device creation failed: %ld\n",
                        PTR_ERR(pdev));
-       else
+       else {
+               set_dev_node(&pdev->dev, acpi_get_node(adev->handle));
                dev_dbg(&adev->dev, "created platform device %s\n",
                        dev_name(&pdev->dev));
+       }
 
        kfree(resources);
+
        return pdev;
 }
 EXPORT_SYMBOL_GPL(acpi_create_platform_device);
index f15900132912a4349ecc5b6efe6a2d2e8ff6530f..66229ffa909b5f31029b275b912fb904c4a69770 100644 (file)
@@ -65,8 +65,6 @@ static inline void acpi_cmos_rtc_init(void) {}
 #endif
 int acpi_rev_override_setup(char *str);
 
-extern bool acpi_force_hot_remove;
-
 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
                                    const char *name);
 int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
index fcd4ce6f78d5d387080343d96738c3b7275d0324..1c2b846c577604d0b471e87d19cecea6caa286f9 100644 (file)
@@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
                return -EINVAL;
 
        /* The state of the list is 'on' IFF all resources are 'on'. */
+       cur_state = 0;
        list_for_each_entry(entry, list, node) {
                struct acpi_power_resource *resource = entry->resource;
                acpi_handle handle = resource->device.handle;
index 2433569b02ef5cf40dd82cebec6fc83186f4dc3b..c269310674158fa56104bdf4d7b4a04dcfda6221 100644 (file)
@@ -30,12 +30,6 @@ extern struct acpi_device *acpi_root;
 
 #define INVALID_ACPI_HANDLE    ((acpi_handle)empty_zero_page)
 
-/*
- * If set, devices will be hot-removed even if they cannot be put offline
- * gracefully (from the kernel's standpoint).
- */
-bool acpi_force_hot_remove;
-
 static const char *dummy_hid = "device";
 
 static LIST_HEAD(acpi_dep_list);
@@ -170,9 +164,6 @@ static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
                        pn->put_online = false;
                }
                ret = device_offline(pn->dev);
-               if (acpi_force_hot_remove)
-                       continue;
-
                if (ret >= 0) {
                        pn->put_online = !ret;
                } else {
@@ -241,11 +232,11 @@ static int acpi_scan_try_to_offline(struct acpi_device *device)
                acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
                                    NULL, acpi_bus_offline, (void *)true,
                                    (void **)&errdev);
-               if (!errdev || acpi_force_hot_remove)
+               if (!errdev)
                        acpi_bus_offline(handle, 0, (void *)true,
                                         (void **)&errdev);
 
-               if (errdev && !acpi_force_hot_remove) {
+               if (errdev) {
                        dev_warn(errdev, "Offline failed.\n");
                        acpi_bus_online(handle, 0, NULL, NULL);
                        acpi_walk_namespace(ACPI_TYPE_ANY, handle,
@@ -263,8 +254,7 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
        unsigned long long sta;
        acpi_status status;
 
-       if (device->handler && device->handler->hotplug.demand_offline
-           && !acpi_force_hot_remove) {
+       if (device->handler && device->handler->hotplug.demand_offline) {
                if (!acpi_scan_is_offline(device, true))
                        return -EBUSY;
        } else {
@@ -1850,6 +1840,8 @@ static void acpi_bus_attach(struct acpi_device *device)
                        device->flags.power_manageable = 0;
 
                device->flags.initialized = true;
+       } else if (device->flags.visited) {
+               goto ok;
        }
 
        ret = acpi_scan_attach_handler(device);
@@ -1866,10 +1858,10 @@ static void acpi_bus_attach(struct acpi_device *device)
        if (ret < 0)
                return;
 
-       if (ret > 0 || !device->pnp.type.platform_id)
-               acpi_device_set_enumerated(device);
-       else
+       if (device->pnp.type.platform_id)
                acpi_default_enumeration(device);
+       else
+               acpi_device_set_enumerated(device);
 
  ok:
        list_for_each_entry(child, &device->children, node)
index cf05ae973381178cc066e1c235c38afb419e1f01..1b5ee1e0e5a3073457b0b15da34aadf292a544ba 100644 (file)
@@ -921,7 +921,7 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
 static ssize_t force_remove_show(struct kobject *kobj,
                                 struct kobj_attribute *attr, char *buf)
 {
-       return sprintf(buf, "%d\n", !!acpi_force_hot_remove);
+       return sprintf(buf, "%d\n", 0);
 }
 
 static ssize_t force_remove_store(struct kobject *kobj,
@@ -935,9 +935,10 @@ static ssize_t force_remove_store(struct kobject *kobj,
        if (ret < 0)
                return ret;
 
-       lock_device_hotplug();
-       acpi_force_hot_remove = val;
-       unlock_device_hotplug();
+       if (val) {
+               pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
+               return -EINVAL;
+       }
        return size;
 }
 
index f96ab717534c4c8fe60e830c4020c6bd0517cf07..1d1dc11aa5faef6a9422faeb0fa7bd3a2732e860 100644 (file)
@@ -3969,7 +3969,7 @@ static int mtip_block_initialize(struct driver_data *dd)
        dd->tags.reserved_tags = 1;
        dd->tags.cmd_size = sizeof(struct mtip_cmd);
        dd->tags.numa_node = dd->numa_node;
-       dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
+       dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED;
        dd->tags.driver_data = dd;
        dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
 
index ab609a76706f7bb0258ce47dda61366a0602d5cc..cf9449b3dbd9742bd8a3559c9939af9e057d9b5f 100644 (file)
@@ -429,6 +429,13 @@ static const struct clk_div_table pll_divp_table[] = {
        { 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 }
 };
 
+static const struct clk_div_table pll_divq_table[] = {
+       { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
+       { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, { 12, 12 }, { 13, 13 },
+       { 14, 14 }, { 15, 15 },
+       { 0 }
+};
+
 static const struct clk_div_table pll_divr_table[] = {
        { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
 };
@@ -496,9 +503,9 @@ struct stm32f4_div_data {
 
 #define MAX_PLL_DIV 3
 static const struct stm32f4_div_data  div_data[MAX_PLL_DIV] = {
-       { 16, 2, 0,                     pll_divp_table  },
-       { 24, 4, CLK_DIVIDER_ONE_BASED, NULL            },
-       { 28, 3, 0,                     pll_divr_table  },
+       { 16, 2, 0, pll_divp_table },
+       { 24, 4, 0, pll_divq_table },
+       { 28, 3, 0, pll_divr_table },
 };
 
 struct stm32f4_pll_data {
index 72109d2cf41b29da83dd88dbdb0e820364c893dd..1c23573010176b6a6ae1ae7d5bf578f56cd8350b 100644 (file)
@@ -1,6 +1,7 @@
 config SUNXI_CCU
        bool "Clock support for Allwinner SoCs"
        depends on ARCH_SUNXI || COMPILE_TEST
+       select RESET_CONTROLLER
        default ARCH_SUNXI
 
 if SUNXI_CCU
@@ -135,6 +136,7 @@ config SUN8I_V3S_CCU
 config SUN9I_A80_CCU
        bool "Support for the Allwinner A80 CCU"
        select SUNXI_CCU_DIV
+       select SUNXI_CCU_MULT
        select SUNXI_CCU_GATE
        select SUNXI_CCU_NKMP
        select SUNXI_CCU_NM
index a7b3c08ed0e232c0cf41ae419f4980bf1614590e..2c69b631967aea3ae81389d29b20e8014c3030e1 100644 (file)
@@ -752,6 +752,13 @@ static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = {
        .num_resets     = ARRAY_SIZE(sun8i_a33_ccu_resets),
 };
 
+static struct ccu_pll_nb sun8i_a33_pll_cpu_nb = {
+       .common = &pll_cpux_clk.common,
+       /* copy from pll_cpux_clk */
+       .enable = BIT(31),
+       .lock   = BIT(28),
+};
+
 static struct ccu_mux_nb sun8i_a33_cpu_nb = {
        .common         = &cpux_clk.common,
        .cm             = &cpux_clk.mux,
@@ -783,6 +790,10 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
 
        sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
 
+       /* Gate then ungate PLL CPU after any rate changes */
+       ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb);
+
+       /* Reparent CPU during PLL CPU rate changes */
        ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
                                  &sun8i_a33_cpu_nb);
 }
index 8a47bafd78905bce849235d791f1469d448afcc9..9d8724715a4352ddd07a411945bc1cd58367053d 100644 (file)
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/iopoll.h>
 #include <linux/slab.h>
 
 #include "ccu_common.h"
+#include "ccu_gate.h"
 #include "ccu_reset.h"
 
 static DEFINE_SPINLOCK(ccu_lock);
@@ -39,6 +41,53 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
        WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
 }
 
+/*
+ * This clock notifier is called when the frequency of a PLL clock is
+ * changed. In common PLL designs, changes to the dividers take effect
+ * almost immediately, while changes to the multipliers (implemented
+ * as dividers in the feedback loop) take a few cycles to work into
+ * the feedback loop for the PLL to stablize.
+ *
+ * Sometimes when the PLL clock rate is changed, the decrease in the
+ * divider is too much for the decrease in the multiplier to catch up.
+ * The PLL clock rate will spike, and in some cases, might lock up
+ * completely.
+ *
+ * This notifier callback will gate and then ungate the clock,
+ * effectively resetting it, so it proceeds to work. Care must be
+ * taken to reparent consumers to other temporary clocks during the
+ * rate change, and that this notifier callback must be the first
+ * to be registered.
+ */
+static int ccu_pll_notifier_cb(struct notifier_block *nb,
+                              unsigned long event, void *data)
+{
+       struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
+       int ret = 0;
+
+       if (event != POST_RATE_CHANGE)
+               goto out;
+
+       ccu_gate_helper_disable(pll->common, pll->enable);
+
+       ret = ccu_gate_helper_enable(pll->common, pll->enable);
+       if (ret)
+               goto out;
+
+       ccu_helper_wait_for_lock(pll->common, pll->lock);
+
+out:
+       return notifier_from_errno(ret);
+}
+
+int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
+{
+       pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
+
+       return clk_notifier_register(pll_nb->common->hw.clk,
+                                    &pll_nb->clk_nb);
+}
+
 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
                    const struct sunxi_ccu_desc *desc)
 {
index 73d81dc58fc5ad91f8a293530aa89e2b22fcbdb3..d6fdd7a789aa746a72f939fb51c552004941fd37 100644 (file)
@@ -83,6 +83,18 @@ struct sunxi_ccu_desc {
 
 void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock);
 
+struct ccu_pll_nb {
+       struct notifier_block   clk_nb;
+       struct ccu_common       *common;
+
+       u32     enable;
+       u32     lock;
+};
+
+#define to_ccu_pll_nb(_nb) container_of(_nb, struct ccu_pll_nb, clk_nb)
+
+int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb);
+
 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
                    const struct sunxi_ccu_desc *desc);
 
index 94250c293be2a18b247e2be006a0e7e4faf4f6f8..c68ac65db7ffec361169c326ede5bbf263a00b1b 100644 (file)
@@ -2006,7 +2006,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
                return;
        case HID_DG_TOOLSERIALNUMBER:
                wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
-               wacom_wac->serial[0] |= value;
+               wacom_wac->serial[0] |= (__u32)value;
                return;
        case WACOM_HID_WD_SENSE:
                wacom_wac->hid_data.sense_state = value;
@@ -2176,6 +2176,16 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
                wacom_wac->hid_data.cc_index = field->index;
                wacom_wac->hid_data.cc_value_index = usage->usage_index;
                break;
+       case HID_DG_CONTACTID:
+               if ((field->logical_maximum - field->logical_minimum) < touch_max) {
+                       /*
+                        * The HID descriptor for G11 sensors leaves logical
+                        * maximum set to '1' despite it being a multitouch
+                        * device. Override to a sensible number.
+                        */
+                       field->logical_maximum = 255;
+               }
+               break;
        }
 }
 
index efc8ec3423514ad33dd2ebaf0c861d41bb960140..e73d968023f7ce7de418dcf1315b7c554773d604 100644 (file)
@@ -1118,6 +1118,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
+ * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
  * Fujitsu T725            0x470f01        05, 12, 09      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
@@ -1523,6 +1524,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
                },
        },
+       {
+               /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
+               },
+       },
        {
                /* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
                .matches = {
index e992a7f8a16fc3019016aa1f2844cfbfb437ad97..2b32b88949ba40dfb3901cf9588f670b7aad8441 100644 (file)
@@ -267,7 +267,7 @@ static void sdio_release_func(struct device *dev)
        sdio_free_func_cis(func);
 
        kfree(func->info);
-
+       kfree(func->tmpbuf);
        kfree(func);
 }
 
@@ -282,6 +282,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
        if (!func)
                return ERR_PTR(-ENOMEM);
 
+       /*
+        * allocate buffer separately to make sure it's properly aligned for
+        * DMA usage (incl. 64 bit DMA)
+        */
+       func->tmpbuf = kmalloc(4, GFP_KERNEL);
+       if (!func->tmpbuf) {
+               kfree(func);
+               return ERR_PTR(-ENOMEM);
+       }
+
        func->card = card;
 
        device_initialize(&func->dev);
index a9ac0b4573131f48cad46044e018b5de479cf695..8718432751c50c6d5a955b104c250b151430d19c 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ioport.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/stat.h>
@@ -1621,10 +1622,16 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
 
                if (card->type == MMC_TYPE_SDIO ||
                    card->type == MMC_TYPE_SD_COMBO) {
-                       set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
+                               pm_runtime_get_noresume(mmc->parent);
+                               set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       }
                        clk_en_a = clk_en_a_old & ~clken_low_pwr;
                } else {
-                       clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
+                               pm_runtime_put_noidle(mmc->parent);
+                               clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+                       }
                        clk_en_a = clk_en_a_old | clken_low_pwr;
                }
 
index 7123ef96ed18523c88553146035103f3517bd372..445fc47dc3e77e39b17e7531eb7dbed58ddd3279 100644 (file)
@@ -830,6 +830,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
 
        switch (uhs) {
        case MMC_TIMING_UHS_SDR50:
+       case MMC_TIMING_UHS_DDR50:
                pinctrl = imx_data->pins_100mhz;
                break;
        case MMC_TIMING_UHS_SDR104:
index 0134ba32a05784b65d1a0e6d470eee7a857df74c..39712560b4c1b55aa847f0ac69f6815edb11e678 100644 (file)
@@ -148,11 +148,11 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
                        return err;
        }
 
-       if (bytes == 0) {
-               err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
-               if (err)
-                       return err;
+       err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+       if (err)
+               return err;
 
+       if (bytes == 0) {
                err = clear_update_marker(ubi, vol, 0);
                if (err)
                        return err;
index 64a1095e4d1495c1e32c3ff7008882789f6b6f6e..a0ca68ce3fbb164ea6e6a2c75a36a8c1ae54172d 100644 (file)
@@ -134,6 +134,7 @@ static void set_max_bgx_per_node(struct pci_dev *pdev)
        pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
        switch (sdevid) {
        case PCI_SUBSYS_DEVID_81XX_BGX:
+       case PCI_SUBSYS_DEVID_81XX_RGX:
                max_bgx_per_node = MAX_BGX_PER_CN81XX;
                break;
        case PCI_SUBSYS_DEVID_83XX_BGX:
index c5080f2cead5d0efc435fd827038eb7dbe4b5830..6b7fe6fdd13b9b27b4a1573e8c7b3869b17bcc19 100644 (file)
@@ -16,6 +16,7 @@
 /* Subsystem device IDs */
 #define PCI_SUBSYS_DEVID_88XX_BGX              0xA126
 #define PCI_SUBSYS_DEVID_81XX_BGX              0xA226
+#define PCI_SUBSYS_DEVID_81XX_RGX              0xA254
 #define PCI_SUBSYS_DEVID_83XX_BGX              0xA326
 
 #define    MAX_BGX_THUNDER                     8 /* Max 2 nodes, 4 per node */
index 9e757684816d48b903f62cdac2d6a1123e6c3305..93949139e62cf92a7e593b36d6a7399edbd1b6e6 100644 (file)
@@ -613,7 +613,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        struct mtk_mac *mac = netdev_priv(dev);
        struct mtk_eth *eth = mac->hw;
        struct mtk_tx_dma *itxd, *txd;
-       struct mtk_tx_buf *tx_buf;
+       struct mtk_tx_buf *itx_buf, *tx_buf;
        dma_addr_t mapped_addr;
        unsigned int nr_frags;
        int i, n_desc = 1;
@@ -627,8 +627,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
        txd4 |= fport;
 
-       tx_buf = mtk_desc_to_tx_buf(ring, itxd);
-       memset(tx_buf, 0, sizeof(*tx_buf));
+       itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+       memset(itx_buf, 0, sizeof(*itx_buf));
 
        if (gso)
                txd4 |= TX_DMA_TSO;
@@ -647,9 +647,11 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
                return -ENOMEM;
 
        WRITE_ONCE(itxd->txd1, mapped_addr);
-       tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-       dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
+       itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+       itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+                         MTK_TX_FLAGS_FPORT1;
+       dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
+       dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
 
        /* TX SG offload */
        txd = itxd;
@@ -685,11 +687,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
                                               last_frag * TX_DMA_LS0));
                        WRITE_ONCE(txd->txd4, fport);
 
-                       tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
                        tx_buf = mtk_desc_to_tx_buf(ring, txd);
                        memset(tx_buf, 0, sizeof(*tx_buf));
-
+                       tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
                        tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+                       tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+                                        MTK_TX_FLAGS_FPORT1;
+
                        dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
                        dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
                        frag_size -= frag_map_size;
@@ -698,7 +702,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        }
 
        /* store skb to cleanup */
-       tx_buf->skb = skb;
+       itx_buf->skb = skb;
 
        WRITE_ONCE(itxd->txd4, txd4);
        WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
@@ -1012,17 +1016,16 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
 
        while ((cpu != dma) && budget) {
                u32 next_cpu = desc->txd2;
-               int mac;
+               int mac = 0;
 
                desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
                if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
                        break;
 
-               mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
-                      TX_DMA_FPORT_MASK;
-               mac--;
-
                tx_buf = mtk_desc_to_tx_buf(ring, desc);
+               if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
+                       mac = 1;
+
                skb = tx_buf->skb;
                if (!skb) {
                        condition = 1;
index 99b1c8e9f16f981a0603f906280dcd98f7fa1b54..08285a96ff7077f83b5ac530e8f59266922819d1 100644 (file)
@@ -406,12 +406,18 @@ struct mtk_hw_stats {
        struct u64_stats_sync   syncp;
 };
 
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
 enum mtk_tx_flags {
+       /* PDMA descriptor can point at 1-2 segments. This enum allows us to
+        * track how memory was allocated so that it can be freed properly.
+        */
        MTK_TX_FLAGS_SINGLE0    = 0x01,
        MTK_TX_FLAGS_PAGE0      = 0x02,
+
+       /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
+        * SKB out instead of looking up through hardware TX descriptor.
+        */
+       MTK_TX_FLAGS_FPORT0     = 0x04,
+       MTK_TX_FLAGS_FPORT1     = 0x08,
 };
 
 /* This enum allows us to identify how the clock is defined on the array of the
index 5bd36a4a8fcdfd201b40321c7fefb82776cd347d..a6e2bbe629bd1842c4f2a2563427de5e58f76f36 100644 (file)
@@ -583,6 +583,13 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
                   p_params->ets_cbs,
                   p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
 
+       if (p_params->ets_enabled && !p_params->max_ets_tc) {
+               p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
+               DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+                          "ETS params: max_ets_tc is forced to %d\n",
+               p_params->max_ets_tc);
+       }
+
        /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
         * encoded in a type u32 array of size 2.
         */
@@ -1001,6 +1008,8 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
        u8 pfc_map = 0;
        int i;
 
+       *pfc &= ~DCBX_PFC_ERROR_MASK;
+
        if (p_params->pfc.willing)
                *pfc |= DCBX_PFC_WILLING_MASK;
        else
@@ -1255,7 +1264,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
 {
        struct qed_dcbx_get *dcbx_info;
 
-       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
        if (!dcbx_info)
                return NULL;
 
@@ -2073,6 +2082,8 @@ static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
        for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
                dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
 
+       dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap;
+
        ptt = qed_ptt_acquire(hwfn);
        if (!ptt)
                return -EINVAL;
index 54248775f227b062addf85044f486ad4512039f5..f68c4db656eda84691b411cee940528a01a2bb62 100644 (file)
@@ -1127,12 +1127,70 @@ static struct mdiobb_ops bb_ops = {
        .get_mdio_data = sh_get_mdio,
 };
 
+/* free Tx skb function */
+static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       struct sh_eth_txdesc *txdesc;
+       int free_num = 0;
+       int entry;
+       bool sent;
+
+       for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
+               entry = mdp->dirty_tx % mdp->num_tx_ring;
+               txdesc = &mdp->tx_ring[entry];
+               sent = !(txdesc->status & cpu_to_le32(TD_TACT));
+               if (sent_only && !sent)
+                       break;
+               /* TACT bit must be checked before all the following reads */
+               dma_rmb();
+               netif_info(mdp, tx_done, ndev,
+                          "tx entry %d status 0x%08x\n",
+                          entry, le32_to_cpu(txdesc->status));
+               /* Free the original skb. */
+               if (mdp->tx_skbuff[entry]) {
+                       dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+                                        le32_to_cpu(txdesc->len) >> 16,
+                                        DMA_TO_DEVICE);
+                       dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
+                       mdp->tx_skbuff[entry] = NULL;
+                       free_num++;
+               }
+               txdesc->status = cpu_to_le32(TD_TFP);
+               if (entry >= mdp->num_tx_ring - 1)
+                       txdesc->status |= cpu_to_le32(TD_TDLE);
+
+               if (sent) {
+                       ndev->stats.tx_packets++;
+                       ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
+               }
+       }
+       return free_num;
+}
+
 /* free skb and descriptor buffer */
 static void sh_eth_ring_free(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        int ringsize, i;
 
+       if (mdp->rx_ring) {
+               for (i = 0; i < mdp->num_rx_ring; i++) {
+                       if (mdp->rx_skbuff[i]) {
+                               struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
+
+                               dma_unmap_single(&ndev->dev,
+                                                le32_to_cpu(rxdesc->addr),
+                                                ALIGN(mdp->rx_buf_sz, 32),
+                                                DMA_FROM_DEVICE);
+                       }
+               }
+               ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+               dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+                                 mdp->rx_desc_dma);
+               mdp->rx_ring = NULL;
+       }
+
        /* Free Rx skb ringbuffer */
        if (mdp->rx_skbuff) {
                for (i = 0; i < mdp->num_rx_ring; i++)
@@ -1141,27 +1199,18 @@ static void sh_eth_ring_free(struct net_device *ndev)
        kfree(mdp->rx_skbuff);
        mdp->rx_skbuff = NULL;
 
-       /* Free Tx skb ringbuffer */
-       if (mdp->tx_skbuff) {
-               for (i = 0; i < mdp->num_tx_ring; i++)
-                       dev_kfree_skb(mdp->tx_skbuff[i]);
-       }
-       kfree(mdp->tx_skbuff);
-       mdp->tx_skbuff = NULL;
-
-       if (mdp->rx_ring) {
-               ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
-               dma_free_coherent(NULL, ringsize, mdp->rx_ring,
-                                 mdp->rx_desc_dma);
-               mdp->rx_ring = NULL;
-       }
-
        if (mdp->tx_ring) {
+               sh_eth_tx_free(ndev, false);
+
                ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
                dma_free_coherent(NULL, ringsize, mdp->tx_ring,
                                  mdp->tx_desc_dma);
                mdp->tx_ring = NULL;
        }
+
+       /* Free Tx skb ringbuffer */
+       kfree(mdp->tx_skbuff);
+       mdp->tx_skbuff = NULL;
 }
 
 /* format skb and descriptor buffer */
@@ -1409,43 +1458,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
        update_mac_address(ndev);
 }
 
-/* free Tx skb function */
-static int sh_eth_txfree(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       struct sh_eth_txdesc *txdesc;
-       int free_num = 0;
-       int entry;
-
-       for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
-               entry = mdp->dirty_tx % mdp->num_tx_ring;
-               txdesc = &mdp->tx_ring[entry];
-               if (txdesc->status & cpu_to_le32(TD_TACT))
-                       break;
-               /* TACT bit must be checked before all the following reads */
-               dma_rmb();
-               netif_info(mdp, tx_done, ndev,
-                          "tx entry %d status 0x%08x\n",
-                          entry, le32_to_cpu(txdesc->status));
-               /* Free the original skb. */
-               if (mdp->tx_skbuff[entry]) {
-                       dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
-                                        le32_to_cpu(txdesc->len) >> 16,
-                                        DMA_TO_DEVICE);
-                       dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
-                       mdp->tx_skbuff[entry] = NULL;
-                       free_num++;
-               }
-               txdesc->status = cpu_to_le32(TD_TFP);
-               if (entry >= mdp->num_tx_ring - 1)
-                       txdesc->status |= cpu_to_le32(TD_TDLE);
-
-               ndev->stats.tx_packets++;
-               ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
-       }
-       return free_num;
-}
-
 /* Packet receive function */
 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 {
@@ -1690,7 +1702,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
                           intr_status, mdp->cur_tx, mdp->dirty_tx,
                           (u32)ndev->state, edtrr);
                /* dirty buffer free */
-               sh_eth_txfree(ndev);
+               sh_eth_tx_free(ndev, true);
 
                /* SH7712 BUG */
                if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
@@ -1751,7 +1763,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                /* Clear Tx interrupts */
                sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
 
-               sh_eth_txfree(ndev);
+               sh_eth_tx_free(ndev, true);
                netif_wake_queue(ndev);
        }
 
@@ -2412,7 +2424,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        spin_lock_irqsave(&mdp->lock, flags);
        if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
-               if (!sh_eth_txfree(ndev)) {
+               if (!sh_eth_tx_free(ndev, true)) {
                        netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
                        netif_stop_queue(ndev);
                        spin_unlock_irqrestore(&mdp->lock, flags);
index 50d28261b6b9ea22f42c26be0e9f0e0bed194109..b9cb697b281847a83aa511c577a6c790516f8012 100644 (file)
@@ -1371,6 +1371,13 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
                free_cpumask_var(thread_mask);
        }
 
+       if (count > EFX_MAX_RX_QUEUES) {
+               netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+                              "Reducing number of rx queues from %u to %u.\n",
+                              count, EFX_MAX_RX_QUEUES);
+               count = EFX_MAX_RX_QUEUES;
+       }
+
        /* If RSS is requested for the PF *and* VFs then we can't write RSS
         * table entries that are inaccessible to VFs
         */
index f5e5cd1659a148fb63ce2078ef13a5ae12d048bc..29614da91cbf919f91841d8e644ab4b246741ec7 100644 (file)
@@ -1354,6 +1354,13 @@ static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
                free_cpumask_var(thread_mask);
        }
 
+       if (count > EF4_MAX_RX_QUEUES) {
+               netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+                              "Reducing number of rx queues from %u to %u.\n",
+                              count, EF4_MAX_RX_QUEUES);
+               count = EF4_MAX_RX_QUEUES;
+       }
+
        return count;
 }
 
index e2460a57e4b1105ed398e207aa8cdfd84d03707d..ed0d10f54f2607533868dfd10e6bc9d0e09050de 100644 (file)
@@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
                skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
                skb_queue_tail(&dp83640->rx_queue, skb);
                schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
-       } else {
-               netif_rx_ni(skb);
        }
 
        return true;
index 6742070ca676f57694a9a6cb11364941deb520a0..1326d99771c115cfd3868c1834e850bf9b1459ef 100644 (file)
@@ -798,9 +798,6 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -940,9 +937,6 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -952,6 +946,7 @@ static struct phy_driver ksphy_driver[] = {
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .driver_data    = &ksz9021_type,
+       .probe          = kszphy_probe,
        .config_init    = ksz9021_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
@@ -971,6 +966,7 @@ static struct phy_driver ksphy_driver[] = {
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .driver_data    = &ksz9021_type,
+       .probe          = kszphy_probe,
        .config_init    = ksz9031_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = ksz9031_read_status,
@@ -989,9 +985,6 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -1003,9 +996,6 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -1017,9 +1007,6 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
-       .get_sset_count = kszphy_get_sset_count,
-       .get_strings    = kszphy_get_strings,
-       .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 } };
index 8a40202c0a1732850da1b2eb64af21470b9c85b4..c4f1c363e24b89404c6834312074f8a4451ded50 100644 (file)
@@ -254,14 +254,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
        tx_overhead = 0x40;
 
        len = skb->len;
-       if (skb_headroom(skb) < tx_overhead) {
-               struct sk_buff *skb2;
-
-               skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
+       if (skb_cow_head(skb, tx_overhead)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        __skb_push(skb, tx_overhead);
index e221bfcee76b40a3ad7ba60ec4d348f4b8f4cc73..947bea81d924124c3827e87f75e732e35adb2acd 100644 (file)
@@ -293,12 +293,9 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 {
        int len = skb->len;
 
-       if (skb_headroom(skb) < 2) {
-               struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
+       if (skb_cow_head(skb, 2)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
        skb_push(skb, 2);
 
index 876f02f4945eafdc2fb5cfa0f9dcb54d9b498af4..2a2c3edb6bad0b3bd257c3a101d100ad3b00cc59 100644 (file)
@@ -803,18 +803,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
        }
 
        /* We now decide whether we can put our special header into the sk_buff */
-       if (skb_cloned(skb) || skb_headroom(skb) < 2) {
-               /* no such luck - we make our own */
-               struct sk_buff *copied_skb;
-               copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
-               dev_kfree_skb_irq(skb);
-               skb = copied_skb;
-               if (!copied_skb) {
-                       kaweth->stats.tx_errors++;
-                       netif_start_queue(net);
-                       spin_unlock_irq(&kaweth->device_lock);
-                       return NETDEV_TX_OK;
-               }
+       if (skb_cow_head(skb, 2)) {
+               kaweth->stats.tx_errors++;
+               netif_start_queue(net);
+               spin_unlock_irq(&kaweth->device_lock);
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
        }
 
        private_header = (__le16 *)__skb_push(skb, 2);
index 9889a70ff4f6fece5bfabbfb45a3470f721a5a32..636f48f19d1eacae67c050de4fc3e651bffdf825 100644 (file)
@@ -2607,14 +2607,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
 {
        u32 tx_cmd_a, tx_cmd_b;
 
-       if (skb_headroom(skb) < TX_OVERHEAD) {
-               struct sk_buff *skb2;
-
-               skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
+       if (skb_cow_head(skb, TX_OVERHEAD)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        if (lan78xx_linearize(skb) < 0)
index 0b17b40d7a4fa2653caf21406c4a6b3b45d868b0..190de9a90f7387c5070c7f589aa18bb7d05ac5d7 100644 (file)
@@ -2203,13 +2203,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
 {
        u32 tx_cmd_a, tx_cmd_b;
 
-       if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
-               struct sk_buff *skb2 =
-                       skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
+       if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
index 831aa33d078ae7d2dd57fdded5de71d1eb915f99..5f19fb0f025d9449d0ba20958610e0d1f083f032 100644 (file)
@@ -2001,13 +2001,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
        /* We do not advertise SG, so skbs should be already linearized */
        BUG_ON(skb_shinfo(skb)->nr_frags);
 
-       if (skb_headroom(skb) < overhead) {
-               struct sk_buff *skb2 = skb_copy_expand(skb,
-                       overhead, 0, flags);
+       /* Make writable and expand header space by overhead if required */
+       if (skb_cow_head(skb, overhead)) {
+               /* Must deallocate here as returning NULL to indicate error
+                * means the skb won't be deallocated in the caller.
+                */
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        if (csum) {
index 4a1e9c489f1f455388ffee289d65e1d6b36cba42..aadfe1d1c37ee67e2a7d17c759db12dad248c41d 100644 (file)
@@ -456,14 +456,9 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 
        len = skb->len;
 
-       if (skb_headroom(skb) < SR_TX_OVERHEAD) {
-               struct sk_buff *skb2;
-
-               skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
+       if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        __skb_push(skb, SR_TX_OVERHEAD);
index d6988db1930d6b38db5f932a81c6c64b9c76776a..7d909c8183e95a62b6f8a3182d3ce645a264e909 100644 (file)
@@ -1128,7 +1128,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
                goto nla_put_failure;
 
        /* rule only needs to appear once */
-       nlh->nlmsg_flags &= NLM_F_EXCL;
+       nlh->nlmsg_flags |= NLM_F_EXCL;
 
        frh = nlmsg_data(nlh);
        memset(frh, 0, sizeof(*frh));
index 9583a5f58a1ddc498b89d1b6cbaa14cf1a60bea2..eeb409c287b8ed304bdafea7804619dfb39d15b5 100644 (file)
@@ -1315,6 +1315,14 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                        if (target)
                                table->entries[state] = target;
 
+                       /*
+                        * Don't allow transitions to the deepest state
+                        * if it's quirked off.
+                        */
+                       if (state == ctrl->npss &&
+                           (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
+                               continue;
+
                        /*
                         * Is this state a useful non-operational state for
                         * higher-power states to autonomously transition to?
@@ -1387,16 +1395,15 @@ struct nvme_core_quirk_entry {
 };
 
 static const struct nvme_core_quirk_entry core_quirks[] = {
-       /*
-        * Seen on a Samsung "SM951 NVMe SAMSUNG 256GB": using APST causes
-        * the controller to go out to lunch.  It dies when the watchdog
-        * timer reads CSTS and gets 0xffffffff.
-        */
        {
-               .vid = 0x144d,
-               .fr = "BXW75D0Q",
+               /*
+                * This Toshiba device seems to die using any APST states.  See:
+                * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
+                */
+               .vid = 0x1179,
+               .mn = "THNSF5256GPUK TOSHIBA",
                .quirks = NVME_QUIRK_NO_APST,
-       },
+       }
 };
 
 /* match is null-terminated but idstr is space-padded. */
index 2aa20e3e5675bf14a8aaf8784bafc344b970a052..ab2d6ec7eb5cc32292a3a9aec3a536fb21f29c4c 100644 (file)
@@ -83,6 +83,11 @@ enum nvme_quirks {
         * APST should not be used.
         */
        NVME_QUIRK_NO_APST                      = (1 << 4),
+
+       /*
+        * The deepest sleep state should not be used.
+        */
+       NVME_QUIRK_NO_DEEPEST_PS                = (1 << 5),
 };
 
 /*
index 26a5fd05fe88aa003a00dc4ece6e9900bd95e618..5d309535abbd6fbef366cc61b254dceac2719618 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/blk-mq-pci.h>
 #include <linux/cpu.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
 #include <linux/genhd.h>
@@ -1943,10 +1944,31 @@ static int nvme_dev_map(struct nvme_dev *dev)
        return -ENODEV;
 }
 
+static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
+{
+       if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
+               /*
+                * Several Samsung devices seem to drop off the PCIe bus
+                * randomly when APST is on and uses the deepest sleep state.
+                * This has been observed on a Samsung "SM951 NVMe SAMSUNG
+                * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
+                * 950 PRO 256GB", but it seems to be restricted to two Dell
+                * laptops.
+                */
+               if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
+                   (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
+                    dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
+                       return NVME_QUIRK_NO_DEEPEST_PS;
+       }
+
+       return 0;
+}
+
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int node, result = -ENOMEM;
        struct nvme_dev *dev;
+       unsigned long quirks = id->driver_data;
 
        node = dev_to_node(&pdev->dev);
        if (node == NUMA_NO_NODE)
@@ -1978,8 +2000,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto put_pci;
 
+       quirks |= check_dell_samsung_bug(pdev);
+
        result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
-                       id->driver_data);
+                       quirks);
        if (result)
                goto release_pools;
 
index fd66a3199db77d41d08e0658d89611370d46446f..cf9d6a9d9fd4fc17afd19d0e659f5e7cff0a3ca8 100644 (file)
@@ -380,9 +380,13 @@ struct pci_ecam_ops hisi_pcie_platform_ops = {
 
 static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
        {
-               .compatible = "hisilicon,pcie-almost-ecam",
+               .compatible =  "hisilicon,hip06-pcie-ecam",
                .data       = (void *) &hisi_pcie_platform_ops,
        },
+       {
+               .compatible =  "hisilicon,hip07-pcie-ecam",
+               .data       = (void *) &hisi_pcie_platform_ops,
+       },
        {},
 };
 
index d7efcb632f7d9dde08b0a494c455725b05d55af0..002f1ce22bd02032062924b19d645ebc25302c93 100644 (file)
@@ -297,14 +297,15 @@ static int pwm_backlight_probe(struct platform_device *pdev)
        }
 
        /*
-        * If the GPIO is configured as input, change the direction to output
-        * and set the GPIO as active.
+        * If the GPIO is not known to be already configured as output, that
+        * is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL,
+        * change the direction to output and set the GPIO as active.
         * Do not force the GPIO to active when it was already output as it
         * could cause backlight flickering or we would enable the backlight too
         * early. Leave the decision of the initial backlight state for later.
         */
        if (pb->enable_gpio &&
-           gpiod_get_direction(pb->enable_gpio) == GPIOF_DIR_IN)
+           gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT)
                gpiod_direction_output(pb->enable_gpio, 1);
 
        pb->power_supply = devm_regulator_get(&pdev->dev, "power");
index cc93ba4da9b592468f36d37b80777e88b8e400e6..27bc360c7ffd7e1081f907c5f080dc4ba439fbfc 100644 (file)
@@ -1015,6 +1015,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
        return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
 }
 
+static bool
+cifs_can_echo(struct TCP_Server_Info *server)
+{
+       if (server->tcpStatus == CifsGood)
+               return true;
+
+       return false;
+}
+
 struct smb_version_operations smb1_operations = {
        .send_cancel = send_nt_cancel,
        .compare_fids = cifs_compare_fids,
@@ -1049,6 +1058,7 @@ struct smb_version_operations smb1_operations = {
        .get_dfs_refer = CIFSGetDFSRefer,
        .qfs_tcon = cifs_qfs_tcon,
        .is_path_accessible = cifs_is_path_accessible,
+       .can_echo = cifs_can_echo,
        .query_path_info = cifs_query_path_info,
        .query_file_info = cifs_query_file_info,
        .get_srv_inum = cifs_get_srv_inum,
index cbeeda1e94a2fbbba61e2adeeb4f9ba89287eaf9..d86031b6ad79301c8ca0ceec9c8b991dd5db70f1 100644 (file)
@@ -2489,7 +2489,7 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
 
 int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
 {
-       if (op->opnum == OP_ILLEGAL)
+       if (op->opnum == OP_ILLEGAL || op->status == nfserr_notsupp)
                return op_encode_hdr_size * sizeof(__be32);
 
        BUG_ON(OPDESC(op)->op_rsize_bop == NULL);
index 1656843e87d2bef47b69d65b23c23946e8936f33..323f492e0822dd3286365d5cdbe56c59ec2d5463 100644 (file)
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -91,6 +91,7 @@ slow:
                return ERR_PTR(-ENOMEM);
        }
        d_instantiate(dentry, inode);
+       dentry->d_flags |= DCACHE_RCUACCESS;
        dentry->d_fsdata = (void *)ns->ops;
        d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
        if (d) {
index 1e712a36468064a75910e8ba9c0e1cfb2bd3ab28..718b749fa11aa8901544a1e6925a7486bbb357f5 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/math64.h>
 #include <linux/uaccess.h>
 #include <linux/random.h>
+#include <linux/ctype.h>
 #include "ubifs.h"
 
 static DEFINE_SPINLOCK(dbg_lock);
@@ -286,8 +287,10 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
                        break;
                }
 
-               pr_err("\t%d: %s (%s)\n",
-                      count++, dent->name, get_dent_type(dent->type));
+               pr_err("\t%d: inode %llu, type %s, len %d\n",
+                      count++, (unsigned long long) le64_to_cpu(dent->inum),
+                      get_dent_type(dent->type),
+                      le16_to_cpu(dent->nlen));
 
                fname_name(&nm) = dent->name;
                fname_len(&nm) = le16_to_cpu(dent->nlen);
@@ -464,7 +467,8 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
                        pr_err("(bad name length, not printing, bad or corrupted node)");
                else {
                        for (i = 0; i < nlen && dent->name[i]; i++)
-                               pr_cont("%c", dent->name[i]);
+                               pr_cont("%c", isprint(dent->name[i]) ?
+                                       dent->name[i] : '?');
                }
                pr_cont("\n");
 
index 30825d882aa94a4c2486d47581f33d1dfca1a409..b777bddaa1dda9f2768952dc562e06df6b068d32 100644 (file)
@@ -606,8 +606,8 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
        }
 
        while (1) {
-               dbg_gen("feed '%s', ino %llu, new f_pos %#x",
-                       dent->name, (unsigned long long)le64_to_cpu(dent->inum),
+               dbg_gen("ino %llu, new f_pos %#x",
+                       (unsigned long long)le64_to_cpu(dent->inum),
                        key_hash_flash(c, &dent->key));
                ubifs_assert(le64_to_cpu(dent->ch.sqnum) >
                             ubifs_inode(dir)->creat_sqnum);
@@ -748,6 +748,11 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
                goto out_fname;
 
        lock_2_inodes(dir, inode);
+
+       /* Handle O_TMPFILE corner case, it is allowed to link a O_TMPFILE. */
+       if (inode->i_nlink == 0)
+               ubifs_delete_orphan(c, inode->i_ino);
+
        inc_nlink(inode);
        ihold(inode);
        inode->i_ctime = ubifs_current_time(inode);
@@ -768,6 +773,8 @@ out_cancel:
        dir->i_size -= sz_change;
        dir_ui->ui_size = dir->i_size;
        drop_nlink(inode);
+       if (inode->i_nlink == 0)
+               ubifs_add_orphan(c, inode->i_ino);
        unlock_2_inodes(dir, inode);
        ubifs_release_budget(c, &req);
        iput(inode);
@@ -1068,8 +1075,10 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
        }
 
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
-       if (err)
+       if (err) {
+               kfree(dev);
                goto out_budg;
+       }
 
        sz_change = CALC_DENT_SIZE(fname_len(&nm));
 
@@ -1316,9 +1325,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
        unsigned int uninitialized_var(saved_nlink);
        struct fscrypt_name old_nm, new_nm;
 
-       if (flags & ~RENAME_NOREPLACE)
-               return -EINVAL;
-
        /*
         * Budget request settings: deletion direntry, new direntry, removing
         * the old inode, and changing old and new parent directory inodes.
index 1d4f365d8f03a439ab0e20caf27ae34ad6e13e74..f6d9af3efa45a6cc8eb448a71f5d43d72d8fcbd1 100644 (file)
@@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance(
        return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
 }
 
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+       req->base.complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+       return req->base.flags;
+}
+
 static inline struct crypto_ahash *crypto_spawn_ahash(
        struct crypto_ahash_spawn *spawn)
 {
index aab032a6ae6124de63b7934f49d0dc564b0d2dc3..97ca105347a6c5e608297ae1a3562925dc6f6834 100644 (file)
@@ -53,7 +53,7 @@ struct sdio_func {
        unsigned int            state;          /* function state */
 #define SDIO_STATE_PRESENT     (1<<0)          /* present in sysfs */
 
-       u8                      tmpbuf[4];      /* DMA:able scratch buffer */
+       u8                      *tmpbuf;        /* DMA:able scratch buffer */
 
        unsigned                num_info;       /* number of info strings */
        const char              **info;         /* info strings */
index 85bbb1799df3f93cb1eacbfec497278c2c20c098..d496c02e14bc44327fd3ab6c3faad0c1d7ac1e12 100644 (file)
@@ -35,7 +35,7 @@
 #define RTF_PREF(pref) ((pref) << 27)
 #define RTF_PREF_MASK  0x18000000
 
-#define RTF_PCPU       0x40000000
+#define RTF_PCPU       0x40000000      /* read-only: can not be set by user */
 #define RTF_LOCAL      0x80000000
 
 
index 7af0dcc5d7555679cea6c08395ab54710e7066e6..821f9e807de5705d5b4d65e502fb13a06d3215bb 100644 (file)
@@ -617,6 +617,14 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
                        if (insn->imm == BPF_FUNC_xdp_adjust_head)
                                prog->xdp_adjust_head = 1;
                        if (insn->imm == BPF_FUNC_tail_call) {
+                               /* If we tail call into other programs, we
+                                * cannot make any assumptions since they
+                                * can be replaced dynamically during runtime
+                                * in the program array.
+                                */
+                               prog->cb_access = 1;
+                               prog->xdp_adjust_head = 1;
+
                                /* mark bpf_tail_call as different opcode
                                 * to avoid conditional branch in
                                 * interpeter for every normal call
index d052947fe78507ac80bbe58a09cc612bca7bab73..e2d356dd75812df8e42dfac3feb132edd5612e33 100644 (file)
@@ -98,7 +98,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
                int ncpus, v, vecs_to_assign, vecs_per_node;
 
                /* Spread the vectors per node */
-               vecs_per_node = (affv - curvec) / nodes;
+               vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
 
                /* Get the cpus on this node which are in the mask */
                cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
index c2b88490d857583026a35090b62f7891446b7ba2..c08fbd2f5ba9fa2a806f326a3a85f5d021d74027 100644 (file)
@@ -46,13 +46,13 @@ enum {
                (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
 
 /*
- * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
+ * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
  * .data and .bss to fit in required 32MB limit for the kernel. With
- * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
+ * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
  * So, reduce the static allocations for lockdeps related structures so that
  * everything fits in current required size limit.
  */
-#ifdef CONFIG_PROVE_LOCKING_SMALL
+#ifdef CONFIG_LOCKDEP_SMALL
 /*
  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  * we track.
index 27bb2e61276e1c6b3a581ce0f3a1f750d5975227..dd3e91d68dc73053e7ba0ca5bed0681b374ea8fc 100644 (file)
@@ -5566,6 +5566,15 @@ static void clear_ftrace_pids(struct trace_array *tr)
        trace_free_pid_list(pid_list);
 }
 
+void ftrace_clear_pids(struct trace_array *tr)
+{
+       mutex_lock(&ftrace_lock);
+
+       clear_ftrace_pids(tr);
+
+       mutex_unlock(&ftrace_lock);
+}
+
 static void ftrace_pid_reset(struct trace_array *tr)
 {
        mutex_lock(&ftrace_lock);
index 54e7a90db848df3d1bca17e1ca966e57d2ce57d2..ca47a4fa2986c953dffa0b74fc791647bf0409fb 100644 (file)
@@ -3405,11 +3405,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
+       struct buffer_page *reader;
+       struct buffer_page *head_page;
+       struct buffer_page *commit_page;
+       unsigned commit;
 
        cpu_buffer = iter->cpu_buffer;
 
-       return iter->head_page == cpu_buffer->commit_page &&
-               iter->head == rb_commit_index(cpu_buffer);
+       /* Remember, trace recording is off when iterator is in use */
+       reader = cpu_buffer->reader_page;
+       head_page = cpu_buffer->head_page;
+       commit_page = cpu_buffer->commit_page;
+       commit = rb_page_commit(commit_page);
+
+       return ((iter->head_page == commit_page && iter->head == commit) ||
+               (iter->head_page == reader && commit_page == head_page &&
+                head_page->read == commit &&
+                iter->head == rb_page_commit(cpu_buffer->reader_page)));
 }
 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
 
index f35109514a015c38de8b2e1da99399fd5f399692..0ad75e9698f6b0f918df83af90da1204e99d8308 100644 (file)
@@ -6733,11 +6733,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
                return ret;
 
  out_reg:
-       ret = register_ftrace_function_probe(glob, ops, count);
+       ret = alloc_snapshot(&global_trace);
+       if (ret < 0)
+               goto out;
 
-       if (ret >= 0)
-               alloc_snapshot(&global_trace);
+       ret = register_ftrace_function_probe(glob, ops, count);
 
+ out:
        return ret < 0 ? ret : 0;
 }
 
@@ -7402,6 +7404,7 @@ static int instance_rmdir(const char *name)
 
        tracing_set_nop(tr);
        event_trace_del_tracer(tr);
+       ftrace_clear_pids(tr);
        ftrace_destroy_function_files(tr);
        tracefs_remove_recursive(tr->dir);
        free_trace_buffers(tr);
index ae1cce91fead25a065899109e426a6cc1e597d28..d19d52d600d623e9d9f0676891e19c6e5e880bce 100644 (file)
@@ -896,6 +896,7 @@ int using_ftrace_ops_list_func(void);
 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
                                  struct dentry *d_tracer);
+void ftrace_clear_pids(struct trace_array *tr);
 #else
 static inline int ftrace_trace_task(struct trace_array *tr)
 {
@@ -914,6 +915,7 @@ ftrace_init_global_array_ops(struct trace_array *tr) { }
 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
+static inline void ftrace_clear_pids(struct trace_array *tr) { }
 /* ftace_func_t type is not defined, use macro instead of static inline */
 #define ftrace_init_array_ops(tr, func) do { } while (0)
 #endif /* CONFIG_FUNCTION_TRACER */
index 97d62c2da6c25dd5721f8c1c75264c83201f7247..fa16c0f82d6e4c159ac4b8751a1fc52631974438 100644 (file)
@@ -1103,9 +1103,6 @@ config PROVE_LOCKING
 
         For more details, see Documentation/locking/lockdep-design.txt.
 
-config PROVE_LOCKING_SMALL
-       bool
-
 config LOCKDEP
        bool
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -1114,6 +1111,9 @@ config LOCKDEP
        select KALLSYMS
        select KALLSYMS_ALL
 
+config LOCKDEP_SMALL
+       bool
+
 config LOCK_STAT
        bool "Lock usage statistics"
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
index ed97c2c14fa80b47ffbf7fa22ec6d4b9b57202b1..738f1d5f83503e546960d005a034abf2dde2c0e7 100644 (file)
@@ -184,9 +184,9 @@ void putback_movable_pages(struct list_head *l)
                        unlock_page(page);
                        put_page(page);
                } else {
-                       putback_lru_page(page);
                        dec_node_page_state(page, NR_ISOLATED_ANON +
                                        page_is_file_cache(page));
+                       putback_lru_page(page);
                }
        }
 }
index f3d603cef2c0c0e5aef09540dd2f8d50da5a808c..07efbc3a86567676986105005f77c64f9f99597a 100644 (file)
@@ -1090,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
-       unsigned long nr_scanned, flags;
+       unsigned long nr_scanned;
        bool isolated_pageblocks;
 
-       spin_lock_irqsave(&zone->lock, flags);
+       spin_lock(&zone->lock);
        isolated_pageblocks = has_isolate_pageblock(zone);
        nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
        if (nr_scanned)
@@ -1142,7 +1142,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        trace_mm_page_pcpu_drain(page, 0, mt);
                } while (--count && --batch_free && !list_empty(list));
        }
-       spin_unlock_irqrestore(&zone->lock, flags);
+       spin_unlock(&zone->lock);
 }
 
 static void free_one_page(struct zone *zone,
@@ -1150,9 +1150,8 @@ static void free_one_page(struct zone *zone,
                                unsigned int order,
                                int migratetype)
 {
-       unsigned long nr_scanned, flags;
-       spin_lock_irqsave(&zone->lock, flags);
-       __count_vm_events(PGFREE, 1 << order);
+       unsigned long nr_scanned;
+       spin_lock(&zone->lock);
        nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
        if (nr_scanned)
                __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
@@ -1162,7 +1161,7 @@ static void free_one_page(struct zone *zone,
                migratetype = get_pfnblock_migratetype(page, pfn);
        }
        __free_one_page(page, pfn, zone, order, migratetype);
-       spin_unlock_irqrestore(&zone->lock, flags);
+       spin_unlock(&zone->lock);
 }
 
 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1240,6 +1239,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
 
 static void __free_pages_ok(struct page *page, unsigned int order)
 {
+       unsigned long flags;
        int migratetype;
        unsigned long pfn = page_to_pfn(page);
 
@@ -1247,7 +1247,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
                return;
 
        migratetype = get_pfnblock_migratetype(page, pfn);
+       local_irq_save(flags);
+       __count_vm_events(PGFREE, 1 << order);
        free_one_page(page_zone(page), page, pfn, order, migratetype);
+       local_irq_restore(flags);
 }
 
 static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2219,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                        int migratetype, bool cold)
 {
        int i, alloced = 0;
-       unsigned long flags;
 
-       spin_lock_irqsave(&zone->lock, flags);
+       spin_lock(&zone->lock);
        for (i = 0; i < count; ++i) {
                struct page *page = __rmqueue(zone, order, migratetype);
                if (unlikely(page == NULL))
@@ -2257,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
         * pages added to the pcp list.
         */
        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
-       spin_unlock_irqrestore(&zone->lock, flags);
+       spin_unlock(&zone->lock);
        return alloced;
 }
 
@@ -2485,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
 {
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
+       unsigned long flags;
        unsigned long pfn = page_to_pfn(page);
        int migratetype;
 
-       if (in_interrupt()) {
-               __free_pages_ok(page, 0);
-               return;
-       }
-
        if (!free_pcp_prepare(page))
                return;
 
        migratetype = get_pfnblock_migratetype(page, pfn);
        set_pcppage_migratetype(page, migratetype);
-       preempt_disable();
+       local_irq_save(flags);
+       __count_vm_event(PGFREE);
 
        /*
         * We only track unmovable, reclaimable and movable on pcp lists.
@@ -2515,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
                migratetype = MIGRATE_MOVABLE;
        }
 
-       __count_vm_event(PGFREE);
        pcp = &this_cpu_ptr(zone->pageset)->pcp;
        if (!cold)
                list_add(&page->lru, &pcp->lists[migratetype]);
@@ -2529,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
        }
 
 out:
-       preempt_enable();
+       local_irq_restore(flags);
 }
 
 /*
@@ -2654,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
 {
        struct page *page;
 
-       VM_BUG_ON(in_interrupt());
-
        do {
                if (list_empty(list)) {
                        pcp->count += rmqueue_bulk(zone, 0,
@@ -2686,8 +2682,9 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
        struct list_head *list;
        bool cold = ((gfp_flags & __GFP_COLD) != 0);
        struct page *page;
+       unsigned long flags;
 
-       preempt_disable();
+       local_irq_save(flags);
        pcp = &this_cpu_ptr(zone->pageset)->pcp;
        list = &pcp->lists[migratetype];
        page = __rmqueue_pcplist(zone,  migratetype, cold, pcp, list);
@@ -2695,7 +2692,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
                __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
                zone_statistics(preferred_zone, zone);
        }
-       preempt_enable();
+       local_irq_restore(flags);
        return page;
 }
 
@@ -2711,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
        unsigned long flags;
        struct page *page;
 
-       if (likely(order == 0) && !in_interrupt()) {
+       if (likely(order == 0)) {
                page = rmqueue_pcplist(preferred_zone, zone, order,
                                gfp_flags, migratetype);
                goto out;
index 809025ed97ea0eee97573a32ba2764c63ee2dffd..5a4f5c5a31e88ee558f536d22f61f05a3fd13c45 100644 (file)
@@ -1768,8 +1768,7 @@ void __init init_mm_internals(void)
 {
        int ret __maybe_unused;
 
-       mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
-                                      WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+       mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
 
 #ifdef CONFIG_SMP
        ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
index 9424673009c14e0fb288b8e4041dba596b37ee8d..29be2466970cd670daa7a8abdd54929c9af39026 100644 (file)
@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
        while ((skb = skb_dequeue(&npinfo->txq))) {
                struct net_device *dev = skb->dev;
                struct netdev_queue *txq;
+               unsigned int q_index;
 
                if (!netif_device_present(dev) || !netif_running(dev)) {
                        kfree_skb(skb);
                        continue;
                }
 
-               txq = skb_get_tx_queue(dev, skb);
-
                local_irq_save(flags);
+               /* check if skb->queue_mapping is still valid */
+               q_index = skb_get_queue_mapping(skb);
+               if (unlikely(q_index >= dev->real_num_tx_queues)) {
+                       q_index = q_index % dev->real_num_tx_queues;
+                       skb_set_queue_mapping(skb, q_index);
+               }
+               txq = netdev_get_tx_queue(dev, q_index);
                HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (netif_xmit_frozen_or_stopped(txq) ||
                    netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
index 9f781092fda9cb8cac22b0743b4bc7666a3bd91a..f86bf69cfb8d8bc17262cdba5d9f57a4726cd476 100644 (file)
@@ -3082,22 +3082,32 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
        if (sg && csum && (mss != GSO_BY_FRAGS))  {
                if (!(features & NETIF_F_GSO_PARTIAL)) {
                        struct sk_buff *iter;
+                       unsigned int frag_len;
 
                        if (!list_skb ||
                            !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
                                goto normal;
 
-                       /* Split the buffer at the frag_list pointer.
-                        * This is based on the assumption that all
-                        * buffers in the chain excluding the last
-                        * containing the same amount of data.
+                       /* If we get here then all the required
+                        * GSO features except frag_list are supported.
+                        * Try to split the SKB to multiple GSO SKBs
+                        * with no frag_list.
+                        * Currently we can do that only when the buffers don't
+                        * have a linear part and all the buffers except
+                        * the last are of the same length.
                         */
+                       frag_len = list_skb->len;
                        skb_walk_frags(head_skb, iter) {
+                               if (frag_len != iter->len && iter->next)
+                                       goto normal;
                                if (skb_headlen(iter))
                                        goto normal;
 
                                len -= iter->len;
                        }
+
+                       if (len != frag_len)
+                               goto normal;
                }
 
                /* GSO partial only requires that we trim off any excess that
@@ -3807,6 +3817,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
        serr->ee.ee_info = tstype;
        serr->opt_stats = opt_stats;
+       serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
        if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
                serr->ee.ee_data = skb_shinfo(skb)->tskey;
                if (sk->sk_protocol == IPPROTO_TCP &&
index ebd953bc5607f3b25fffddcb26a5c65e5490cb2b..1d46d05efb0ff067c35750ac43be8e7babd60446 100644 (file)
@@ -488,16 +488,15 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk,
                return false;
 
        /* Support IP_PKTINFO on tstamp packets if requested, to correlate
-        * timestamp with egress dev. Not possible for packets without dev
+        * timestamp with egress dev. Not possible for packets without iif
         * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
         */
-       if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
-           (!skb->dev))
+       info = PKTINFO_SKB_CB(skb);
+       if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
+           !info->ipi_ifindex)
                return false;
 
-       info = PKTINFO_SKB_CB(skb);
        info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
-       info->ipi_ifindex = skb->dev->ifindex;
        return true;
 }
 
@@ -591,6 +590,7 @@ static bool setsockopt_needs_rtnl(int optname)
        case MCAST_LEAVE_GROUP:
        case MCAST_LEAVE_SOURCE_GROUP:
        case MCAST_UNBLOCK_SOURCE:
+       case IP_ROUTER_ALERT:
                return true;
        }
        return false;
index c0317c940bcdc303015f500b52198e0862440e17..b036e85e093b3e97cee1b0dbffc8d1dfeb6a2b72 100644 (file)
@@ -1278,7 +1278,7 @@ static void mrtsock_destruct(struct sock *sk)
        struct net *net = sock_net(sk);
        struct mr_table *mrt;
 
-       rtnl_lock();
+       ASSERT_RTNL();
        ipmr_for_each_table(mrt, net) {
                if (sk == rtnl_dereference(mrt->mroute_sk)) {
                        IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
@@ -1289,7 +1289,6 @@ static void mrtsock_destruct(struct sock *sk)
                        mroute_clean_tables(mrt, false);
                }
        }
-       rtnl_unlock();
 }
 
 /* Socket options and virtual interface manipulation. The whole
@@ -1353,13 +1352,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                if (sk != rcu_access_pointer(mrt->mroute_sk)) {
                        ret = -EACCES;
                } else {
-                       /* We need to unlock here because mrtsock_destruct takes
-                        * care of rtnl itself and we can't change that due to
-                        * the IP_ROUTER_ALERT setsockopt which runs without it.
-                        */
-                       rtnl_unlock();
                        ret = ip_ra_control(sk, 0, NULL);
-                       goto out;
+                       goto out_unlock;
                }
                break;
        case MRT_ADD_VIF:
@@ -1470,7 +1464,6 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
        }
 out_unlock:
        rtnl_unlock();
-out:
        return ret;
 }
 
index 8119e1f66e036ad2a8372bf24dd943c7d9631d8e..9d943974de2b6d91c56b2ae2dee0019883f8f3cf 100644 (file)
@@ -682,7 +682,9 @@ static void raw_close(struct sock *sk, long timeout)
        /*
         * Raw sockets may have direct kernel references. Kill them.
         */
+       rtnl_lock();
        ip_ra_control(sk, 0, NULL);
+       rtnl_unlock();
 
        sk_common_release(sk);
 }
index eec27f87efaca15133cf1d5225e37e6a2f6a6f8a..e011122ebd43c190aec3812099345ec852444284 100644 (file)
@@ -405,9 +405,6 @@ static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
  * At one point, excluding local errors was a quick test to identify icmp/icmp6
  * errors. This is no longer true, but the test remained, so the v6 stack,
  * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
- *
- * Timestamp code paths do not initialize the fields expected by cmsg:
- * the PKTINFO fields in skb->cb[]. Fill those in here.
  */
 static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
                                      struct sock_exterr_skb *serr)
@@ -419,14 +416,9 @@ static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
        if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
                return false;
 
-       if (!skb->dev)
+       if (!IP6CB(skb)->iif)
                return false;
 
-       if (skb->protocol == htons(ETH_P_IPV6))
-               IP6CB(skb)->iif = skb->dev->ifindex;
-       else
-               PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
-
        return true;
 }
 
index 275cac628a95066f0a27e93f5015ddeb0172c28c..25192a3b0cd7ec2a100fd4c5c20d52f8333984e8 100644 (file)
@@ -388,7 +388,6 @@ looped_back:
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
                                  ((&hdr->segments_left) -
                                   skb_network_header(skb)));
-               kfree_skb(skb);
                return -1;
        }
 
index aacfb4bce1533b3f3b38e1173c18cb1bb6b33099..c45b12b4431cbfcaef1f8452bae871bb176be478 100644 (file)
@@ -122,11 +122,14 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
                        max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
        /*
         * RFC4291 2.5.3
+        * The loopback address must not be used as the source address in IPv6
+        * packets that are sent outside of a single node. [..]
         * A packet received on an interface with a destination address
         * of loopback must be dropped.
         */
-       if (!(dev->flags & IFF_LOOPBACK) &&
-           ipv6_addr_loopback(&hdr->daddr))
+       if ((ipv6_addr_loopback(&hdr->saddr) ||
+            ipv6_addr_loopback(&hdr->daddr)) &&
+            !(dev->flags & IFF_LOOPBACK))
                goto err;
 
        /* RFC4291 Errata ID: 3480
index 6ba6c900ebcf430cf313a2bef55ff69c114af218..bf34d0950752ba1466fa806fd4f8ce0b802b0087 100644 (file)
@@ -774,7 +774,8 @@ failure:
  *     Delete a VIF entry
  */
 
-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
+static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
+                      struct list_head *head)
 {
        struct mif_device *v;
        struct net_device *dev;
@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
                                             dev->ifindex, &in6_dev->cnf);
        }
 
-       if (v->flags & MIFF_REGISTER)
+       if ((v->flags & MIFF_REGISTER) && !notify)
                unregister_netdevice_queue(dev, head);
 
        dev_put(dev);
@@ -1331,7 +1332,6 @@ static int ip6mr_device_event(struct notifier_block *this,
        struct mr6_table *mrt;
        struct mif_device *v;
        int ct;
-       LIST_HEAD(list);
 
        if (event != NETDEV_UNREGISTER)
                return NOTIFY_DONE;
@@ -1340,10 +1340,9 @@ static int ip6mr_device_event(struct notifier_block *this,
                v = &mrt->vif6_table[0];
                for (ct = 0; ct < mrt->maxvif; ct++, v++) {
                        if (v->dev == dev)
-                               mif6_delete(mrt, ct, &list);
+                               mif6_delete(mrt, ct, 1, NULL);
                }
        }
-       unregister_netdevice_many(&list);
 
        return NOTIFY_DONE;
 }
@@ -1552,7 +1551,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
        for (i = 0; i < mrt->maxvif; i++) {
                if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
                        continue;
-               mif6_delete(mrt, i, &list);
+               mif6_delete(mrt, i, 0, &list);
        }
        unregister_netdevice_many(&list);
 
@@ -1707,7 +1706,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
                if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
                        return -EFAULT;
                rtnl_lock();
-               ret = mif6_delete(mrt, mifi, NULL);
+               ret = mif6_delete(mrt, mifi, 0, NULL);
                rtnl_unlock();
                return ret;
 
index 9db1418993f2b8a5b4194895f243441033d4729a..fb174b590fd3b443a7503207d822dd02e7171290 100644 (file)
@@ -1854,6 +1854,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
        int addr_type;
        int err = -EINVAL;
 
+       /* RTF_PCPU is an internal flag; can not be set by userspace */
+       if (cfg->fc_flags & RTF_PCPU)
+               goto out;
+
        if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
                goto out;
 #ifndef CONFIG_IPV6_SUBTREES
index a855eb325b030a666fe92c56a2d432c77d9dfe7a..5f44ffed25768d83c31b31295474c5ecf623e986 100644 (file)
@@ -53,6 +53,9 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
                struct sr6_tlv *tlv;
                unsigned int tlv_len;
 
+               if (trailing < sizeof(*tlv))
+                       return false;
+
                tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset);
                tlv_len = sizeof(*tlv) + tlv->len;
 
index c6252ed42c1de65dee149d7d869b62b96616e22a..be8cecc6500214de68cc8872b48b38c840d3304f 100644 (file)
@@ -63,8 +63,13 @@ struct pfkey_sock {
                } u;
                struct sk_buff  *skb;
        } dump;
+       struct mutex dump_lock;
 };
 
+static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
+                              xfrm_address_t *saddr, xfrm_address_t *daddr,
+                              u16 *family);
+
 static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
 {
        return (struct pfkey_sock *)sk;
@@ -139,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
 {
        struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
        struct sock *sk;
+       struct pfkey_sock *pfk;
        int err;
 
        if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -153,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
        if (sk == NULL)
                goto out;
 
+       pfk = pfkey_sk(sk);
+       mutex_init(&pfk->dump_lock);
+
        sock->ops = &pfkey_ops;
        sock_init_data(sock, sk);
 
@@ -281,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
        struct sadb_msg *hdr;
        int rc;
 
+       mutex_lock(&pfk->dump_lock);
+       if (!pfk->dump.dump) {
+               rc = 0;
+               goto out;
+       }
+
        rc = pfk->dump.dump(pfk);
-       if (rc == -ENOBUFS)
-               return 0;
+       if (rc == -ENOBUFS) {
+               rc = 0;
+               goto out;
+       }
 
        if (pfk->dump.skb) {
-               if (!pfkey_can_dump(&pfk->sk))
-                       return 0;
+               if (!pfkey_can_dump(&pfk->sk)) {
+                       rc = 0;
+                       goto out;
+               }
 
                hdr = (struct sadb_msg *) pfk->dump.skb->data;
                hdr->sadb_msg_seq = 0;
@@ -298,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
        }
 
        pfkey_terminate_dump(pfk);
+
+out:
+       mutex_unlock(&pfk->dump_lock);
        return rc;
 }
 
@@ -1793,19 +1815,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
        struct xfrm_address_filter *filter = NULL;
        struct pfkey_sock *pfk = pfkey_sk(sk);
 
-       if (pfk->dump.dump != NULL)
+       mutex_lock(&pfk->dump_lock);
+       if (pfk->dump.dump != NULL) {
+               mutex_unlock(&pfk->dump_lock);
                return -EBUSY;
+       }
 
        proto = pfkey_satype2proto(hdr->sadb_msg_satype);
-       if (proto == 0)
+       if (proto == 0) {
+               mutex_unlock(&pfk->dump_lock);
                return -EINVAL;
+       }
 
        if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
                struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
 
                filter = kmalloc(sizeof(*filter), GFP_KERNEL);
-               if (filter == NULL)
+               if (filter == NULL) {
+                       mutex_unlock(&pfk->dump_lock);
                        return -ENOMEM;
+               }
 
                memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
                       sizeof(xfrm_address_t));
@@ -1821,6 +1850,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
        pfk->dump.dump = pfkey_dump_sa;
        pfk->dump.done = pfkey_dump_sa_done;
        xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
+       mutex_unlock(&pfk->dump_lock);
 
        return pfkey_do_dump(pfk);
 }
@@ -1913,19 +1943,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
 
        /* addresses present only in tunnel mode */
        if (t->mode == XFRM_MODE_TUNNEL) {
-               u8 *sa = (u8 *) (rq + 1);
-               int family, socklen;
+               int err;
 
-               family = pfkey_sockaddr_extract((struct sockaddr *)sa,
-                                               &t->saddr);
-               if (!family)
-                       return -EINVAL;
-
-               socklen = pfkey_sockaddr_len(family);
-               if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
-                                          &t->id.daddr) != family)
-                       return -EINVAL;
-               t->encap_family = family;
+               err = parse_sockaddr_pair(
+                       (struct sockaddr *)(rq + 1),
+                       rq->sadb_x_ipsecrequest_len - sizeof(*rq),
+                       &t->saddr, &t->id.daddr, &t->encap_family);
+               if (err)
+                       return err;
        } else
                t->encap_family = xp->family;
 
@@ -1945,7 +1970,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
        if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
                return -EINVAL;
 
-       while (len >= sizeof(struct sadb_x_ipsecrequest)) {
+       while (len >= sizeof(*rq)) {
+               if (len < rq->sadb_x_ipsecrequest_len ||
+                   rq->sadb_x_ipsecrequest_len < sizeof(*rq))
+                       return -EINVAL;
+
                if ((err = parse_ipsecrequest(xp, rq)) < 0)
                        return err;
                len -= rq->sadb_x_ipsecrequest_len;
@@ -2408,7 +2437,6 @@ out:
        return err;
 }
 
-#ifdef CONFIG_NET_KEY_MIGRATE
 static int pfkey_sockaddr_pair_size(sa_family_t family)
 {
        return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
@@ -2420,7 +2448,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
 {
        int af, socklen;
 
-       if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
+       if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
                return -EINVAL;
 
        af = pfkey_sockaddr_extract(sa, saddr);
@@ -2436,6 +2464,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
        return 0;
 }
 
+#ifdef CONFIG_NET_KEY_MIGRATE
 static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
                                    struct xfrm_migrate *m)
 {
@@ -2443,13 +2472,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
        struct sadb_x_ipsecrequest *rq2;
        int mode;
 
-       if (len <= sizeof(struct sadb_x_ipsecrequest) ||
-           len < rq1->sadb_x_ipsecrequest_len)
+       if (len < sizeof(*rq1) ||
+           len < rq1->sadb_x_ipsecrequest_len ||
+           rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
                return -EINVAL;
 
        /* old endoints */
        err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
-                                 rq1->sadb_x_ipsecrequest_len,
+                                 rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
                                  &m->old_saddr, &m->old_daddr,
                                  &m->old_family);
        if (err)
@@ -2458,13 +2488,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
        rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
        len -= rq1->sadb_x_ipsecrequest_len;
 
-       if (len <= sizeof(struct sadb_x_ipsecrequest) ||
-           len < rq2->sadb_x_ipsecrequest_len)
+       if (len <= sizeof(*rq2) ||
+           len < rq2->sadb_x_ipsecrequest_len ||
+           rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
                return -EINVAL;
 
        /* new endpoints */
        err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
-                                 rq2->sadb_x_ipsecrequest_len,
+                                 rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
                                  &m->new_saddr, &m->new_daddr,
                                  &m->new_family);
        if (err)
@@ -2679,14 +2710,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
 {
        struct pfkey_sock *pfk = pfkey_sk(sk);
 
-       if (pfk->dump.dump != NULL)
+       mutex_lock(&pfk->dump_lock);
+       if (pfk->dump.dump != NULL) {
+               mutex_unlock(&pfk->dump_lock);
                return -EBUSY;
+       }
 
        pfk->dump.msg_version = hdr->sadb_msg_version;
        pfk->dump.msg_portid = hdr->sadb_msg_pid;
        pfk->dump.dump = pfkey_dump_sp;
        pfk->dump.done = pfkey_dump_sp_done;
        xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
+       mutex_unlock(&pfk->dump_lock);
 
        return pfkey_do_dump(pfk);
 }
index e48724a6725e3266c1d5559d268339a7d2cd7f10..4d7543d1a62cce8d70e2d6894ffb86920c80c241 100644 (file)
@@ -208,6 +208,51 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
        return len;
 }
 
+static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
+                                        struct sk_buff *skb,
+                                        int rtap_vendor_space)
+{
+       struct {
+               struct ieee80211_hdr_3addr hdr;
+               u8 category;
+               u8 action_code;
+       } __packed action;
+
+       if (!sdata)
+               return;
+
+       BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
+
+       if (skb->len < rtap_vendor_space + sizeof(action) +
+                      VHT_MUMIMO_GROUPS_DATA_LEN)
+               return;
+
+       if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
+               return;
+
+       skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
+
+       if (!ieee80211_is_action(action.hdr.frame_control))
+               return;
+
+       if (action.category != WLAN_CATEGORY_VHT)
+               return;
+
+       if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
+               return;
+
+       if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
+               return;
+
+       skb = skb_copy(skb, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+       skb_queue_tail(&sdata->skb_queue, skb);
+       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+}
+
 /*
  * ieee80211_add_rx_radiotap_header - add radiotap header
  *
@@ -515,7 +560,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
        struct net_device *prev_dev = NULL;
        int present_fcs_len = 0;
        unsigned int rtap_vendor_space = 0;
-       struct ieee80211_mgmt *mgmt;
        struct ieee80211_sub_if_data *monitor_sdata =
                rcu_dereference(local->monitor_sdata);
 
@@ -553,6 +597,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
                return remove_monitor_info(local, origskb, rtap_vendor_space);
        }
 
+       ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
+
        /* room for the radiotap header based on driver features */
        rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
        needed_headroom = rt_hdrlen - rtap_vendor_space;
@@ -618,23 +664,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
                ieee80211_rx_stats(sdata->dev, skb->len);
        }
 
-       mgmt = (void *)skb->data;
-       if (monitor_sdata &&
-           skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
-           ieee80211_is_action(mgmt->frame_control) &&
-           mgmt->u.action.category == WLAN_CATEGORY_VHT &&
-           mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
-           is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
-           ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
-               struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
-
-               if (mu_skb) {
-                       mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
-                       skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
-                       ieee80211_queue_work(&local->hw, &monitor_sdata->work);
-               }
-       }
-
        if (prev_dev) {
                skb->dev = prev_dev;
                netif_receive_skb(skb);
@@ -3610,6 +3639,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
                            !ether_addr_equal(bssid, hdr->addr1))
                                return false;
                }
+
+               /*
+                * 802.11-2016 Table 9-26 says that for data frames, A1 must be
+                * the BSSID - we've checked that already but may have accepted
+                * the wildcard (ff:ff:ff:ff:ff:ff).
+                *
+                * It also says:
+                *      The BSSID of the Data frame is determined as follows:
+                *      a) If the STA is contained within an AP or is associated
+                *         with an AP, the BSSID is the address currently in use
+                *         by the STA contained in the AP.
+                *
+                * So we should not accept data frames with an address that's
+                * multicast.
+                *
+                * Accepting it also opens a security problem because stations
+                * could encrypt it with the GTK and inject traffic that way.
+                */
+               if (ieee80211_is_data(hdr->frame_control) && multicast)
+                       return false;
+
                return true;
        case NL80211_IFTYPE_WDS:
                if (bssid || !ieee80211_is_data(hdr->frame_control))
index ae5ac175b2bef96ffa614bc799db5cd90a7bdc08..9da7368b0140f9e4a96794e21d66f487881987ba 100644 (file)
@@ -658,7 +658,9 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        }
 
        if (plen != len) {
-               skb_pad(skb, plen - len);
+               rc = skb_pad(skb, plen - len);
+               if (rc)
+                       goto out_node;
                skb_put(skb, plen - len);
        }
 
index b70aa57319ea3233395dc7ae349b8b7aab5dfd03..e05b924618a03e6a58655873700ee66e0688b7ad 100644 (file)
@@ -529,20 +529,20 @@ errout:
        return err;
 }
 
-static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb)
+static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
 {
-       a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL);
-       if (!a->act_cookie)
-               return -ENOMEM;
+       struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return NULL;
 
-       a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
-       if (!a->act_cookie->data) {
-               kfree(a->act_cookie);
-               return -ENOMEM;
+       c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
+       if (!c->data) {
+               kfree(c);
+               return NULL;
        }
-       a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]);
+       c->len = nla_len(tb[TCA_ACT_COOKIE]);
 
-       return 0;
+       return c;
 }
 
 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
@@ -551,6 +551,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
 {
        struct tc_action *a;
        struct tc_action_ops *a_o;
+       struct tc_cookie *cookie = NULL;
        char act_name[IFNAMSIZ];
        struct nlattr *tb[TCA_ACT_MAX + 1];
        struct nlattr *kind;
@@ -566,6 +567,18 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
                        goto err_out;
                if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
                        goto err_out;
+               if (tb[TCA_ACT_COOKIE]) {
+                       int cklen = nla_len(tb[TCA_ACT_COOKIE]);
+
+                       if (cklen > TC_COOKIE_MAX_SIZE)
+                               goto err_out;
+
+                       cookie = nla_memdup_cookie(tb);
+                       if (!cookie) {
+                               err = -ENOMEM;
+                               goto err_out;
+                       }
+               }
        } else {
                err = -EINVAL;
                if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
@@ -604,20 +617,12 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
        if (err < 0)
                goto err_mod;
 
-       if (tb[TCA_ACT_COOKIE]) {
-               int cklen = nla_len(tb[TCA_ACT_COOKIE]);
-
-               if (cklen > TC_COOKIE_MAX_SIZE) {
-                       err = -EINVAL;
-                       tcf_hash_release(a, bind);
-                       goto err_mod;
-               }
-
-               if (nla_memdup_cookie(a, tb) < 0) {
-                       err = -ENOMEM;
-                       tcf_hash_release(a, bind);
-                       goto err_mod;
+       if (name == NULL && tb[TCA_ACT_COOKIE]) {
+               if (a->act_cookie) {
+                       kfree(a->act_cookie->data);
+                       kfree(a->act_cookie);
                }
+               a->act_cookie = cookie;
        }
 
        /* module count goes up only when brand new policy is created
@@ -632,6 +637,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
 err_mod:
        module_put(a_o->owner);
 err_out:
+       if (cookie) {
+               kfree(cookie->data);
+               kfree(cookie);
+       }
        return ERR_PTR(err);
 }
 
index addf060399e09547307d9c023f36d8dbf869a931..9cb4fe4478a137e7bb3352ae958830657f6b5b7c 100644 (file)
@@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
  * immediately unlinked.
  */
 struct key_type key_type_dead = {
-       .name = "dead",
+       .name = ".dead",
 };
 
 /*
index 52c34532c78562643fce84832a5b536baf84988b..4ad3212adebe8becc152f22d2448f8db4716146b 100644 (file)
@@ -273,7 +273,8 @@ error:
  * Create and join an anonymous session keyring or join a named session
  * keyring, creating it if necessary.  A named session keyring must have Search
  * permission for it to be joined.  Session keyrings without this permit will
- * be skipped over.
+ * be skipped over.  It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
  *
  * If successful, the ID of the joined session keyring will be returned.
  */
@@ -290,12 +291,16 @@ long keyctl_join_session_keyring(const char __user *_name)
                        ret = PTR_ERR(name);
                        goto error;
                }
+
+               ret = -EPERM;
+               if (name[0] == '.')
+                       goto error_name;
        }
 
        /* join the session */
        ret = join_session_keyring(name);
+error_name:
        kfree(name);
-
 error:
        return ret;
 }
@@ -1253,8 +1258,8 @@ error:
  * Read or set the default keyring in which request_key() will cache keys and
  * return the old setting.
  *
- * If a process keyring is specified then this will be created if it doesn't
- * yet exist.  The old setting will be returned if successful.
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist.  The old setting will be returned if successful.
  */
 long keyctl_set_reqkey_keyring(int reqkey_defl)
 {
@@ -1279,11 +1284,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
 
        case KEY_REQKEY_DEFL_PROCESS_KEYRING:
                ret = install_process_keyring_to_cred(new);
-               if (ret < 0) {
-                       if (ret != -EEXIST)
-                               goto error;
-                       ret = 0;
-               }
+               if (ret < 0)
+                       goto error;
                goto set;
 
        case KEY_REQKEY_DEFL_DEFAULT:
index b6fdd22205b169b663cdb00aecd5d214c7a376dd..9139b18fc863eb36d62d7777f1553dda63236dfe 100644 (file)
@@ -128,13 +128,18 @@ error:
 }
 
 /*
- * Install a fresh thread keyring directly to new credentials.  This keyring is
- * allowed to overrun the quota.
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already.  This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
  */
 int install_thread_keyring_to_cred(struct cred *new)
 {
        struct key *keyring;
 
+       if (new->thread_keyring)
+               return 0;
+
        keyring = keyring_alloc("_tid", new->uid, new->gid, new,
                                KEY_POS_ALL | KEY_USR_VIEW,
                                KEY_ALLOC_QUOTA_OVERRUN,
@@ -147,7 +152,9 @@ int install_thread_keyring_to_cred(struct cred *new)
 }
 
 /*
- * Install a fresh thread keyring, discarding the old one.
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
  */
 static int install_thread_keyring(void)
 {
@@ -158,8 +165,6 @@ static int install_thread_keyring(void)
        if (!new)
                return -ENOMEM;
 
-       BUG_ON(new->thread_keyring);
-
        ret = install_thread_keyring_to_cred(new);
        if (ret < 0) {
                abort_creds(new);
@@ -170,17 +175,17 @@ static int install_thread_keyring(void)
 }
 
 /*
- * Install a process keyring directly to a credentials struct.
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already.  This is allowed to overrun the quota.
  *
- * Returns -EEXIST if there was already a process keyring, 0 if one installed,
- * and other value on any other error
+ * Return: 0 if a process keyring is now present; -errno on failure.
  */
 int install_process_keyring_to_cred(struct cred *new)
 {
        struct key *keyring;
 
        if (new->process_keyring)
-               return -EEXIST;
+               return 0;
 
        keyring = keyring_alloc("_pid", new->uid, new->gid, new,
                                KEY_POS_ALL | KEY_USR_VIEW,
@@ -194,11 +199,9 @@ int install_process_keyring_to_cred(struct cred *new)
 }
 
 /*
- * Make sure a process keyring is installed for the current process.  The
- * existing process keyring is not replaced.
+ * Install a process keyring to the current task if it didn't have one already.
  *
- * Returns 0 if there is a process keyring by the end of this function, some
- * error otherwise.
+ * Return: 0 if a process keyring is now present; -errno on failure.
  */
 static int install_process_keyring(void)
 {
@@ -212,14 +215,18 @@ static int install_process_keyring(void)
        ret = install_process_keyring_to_cred(new);
        if (ret < 0) {
                abort_creds(new);
-               return ret != -EEXIST ? ret : 0;
+               return ret;
        }
 
        return commit_creds(new);
 }
 
 /*
- * Install a session keyring directly to a credentials struct.
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any.  If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
  */
 int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
 {
@@ -254,8 +261,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
 }
 
 /*
- * Install a session keyring, discarding the old one.  If a keyring is not
- * supplied, an empty one is invented.
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any.  If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
  */
 static int install_session_keyring(struct key *keyring)
 {
index a0aa2009b0e0a81e65672eb795039a2172484c55..20f1871874df54a7d567797c753e379a760e492c 100644 (file)
@@ -282,7 +282,7 @@ static void test_arraymap_percpu(int task, void *data)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        int key, next_key, fd, i;
-       long values[nr_cpus];
+       long long values[nr_cpus];
 
        fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
                            sizeof(values[0]), 2, 0);
@@ -340,7 +340,7 @@ static void test_arraymap_percpu_many_keys(void)
         * allocator more than anything else
         */
        unsigned int nr_keys = 2000;
-       long values[nr_cpus];
+       long long values[nr_cpus];
        int key, fd, i;
 
        fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
new file mode 100644 (file)
index 0000000..bab5ff7
--- /dev/null
@@ -0,0 +1,117 @@
+#!/bin/sh
+# description: ftrace - function pid filters
+
+# Make sure that function pid matching filter works.
+# Also test it on an instance directory
+
+if ! grep -q function available_tracers; then
+    echo "no function tracer configured"
+    exit_unsupported
+fi
+
+if [ ! -f set_ftrace_pid ]; then
+    echo "set_ftrace_pid not found? Is function tracer not set?"
+    exit_unsupported
+fi
+
+if [ ! -f set_ftrace_filter ]; then
+    echo "set_ftrace_filter not found? Is function tracer not set?"
+    exit_unsupported
+fi
+
+do_function_fork=1
+
+if [ ! -f options/function-fork ]; then
+    do_function_fork=0
+    echo "no option for function-fork found. Option will not be tested."
+fi
+
+read PID _ < /proc/self/stat
+
+if [ $do_function_fork -eq 1 ]; then
+    # default value of function-fork option
+    orig_value=`grep function-fork trace_options`
+fi
+
+do_reset() {
+    reset_tracer
+    clear_trace
+    enable_tracing
+    echo > set_ftrace_filter
+    echo > set_ftrace_pid
+
+    if [ $do_function_fork -eq 0 ]; then
+       return
+    fi
+
+    echo $orig_value > trace_options
+}
+
+fail() { # msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+yield() {
+    ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
+}
+
+do_test() {
+    disable_tracing
+
+    echo do_execve* > set_ftrace_filter
+    echo *do_fork >> set_ftrace_filter
+
+    echo $PID > set_ftrace_pid
+    echo function > current_tracer
+
+    if [ $do_function_fork -eq 1 ]; then
+       # don't allow children to be traced
+       echo nofunction-fork > trace_options
+    fi
+
+    enable_tracing
+    yield
+
+    count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
+    count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
+
+    # count_other should be 0
+    if [ $count_pid -eq 0 -o $count_other -ne 0 ]; then
+       fail "PID filtering not working?"
+    fi
+
+    disable_tracing
+    clear_trace
+
+    if [ $do_function_fork -eq 0 ]; then
+       return
+    fi
+
+    # allow children to be traced
+    echo function-fork > trace_options
+
+    enable_tracing
+    yield
+
+    count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
+    count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
+
+    # count_other should NOT be 0
+    if [ $count_pid -eq 0 -o $count_other -eq 0 ]; then
+       fail "PID filtering not following fork?"
+    fi
+}
+
+do_test
+
+mkdir instances/foo
+cd instances/foo
+do_test
+cd ../../
+rmdir instances/foo
+
+do_reset
+
+exit 0
index 4124593696862fcb370fee15bcf7e34e11cdf9e0..e62bb354820cacdc653a19db385e3bd497931d0b 100644 (file)
@@ -75,7 +75,7 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
 {
        int fd, val;
 
-       fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP));
+       fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_IP));
        if (fd < 0) {
                perror("socket packet");
                exit(1);
@@ -95,6 +95,24 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
        return fd;
 }
 
+static void sock_fanout_set_cbpf(int fd)
+{
+       struct sock_filter bpf_filter[] = {
+               BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 80),           /* ldb [80] */
+               BPF_STMT(BPF_RET+BPF_A, 0),                   /* ret A */
+       };
+       struct sock_fprog bpf_prog;
+
+       bpf_prog.filter = bpf_filter;
+       bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
+
+       if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &bpf_prog,
+                      sizeof(bpf_prog))) {
+               perror("fanout data cbpf");
+               exit(1);
+       }
+}
+
 static void sock_fanout_set_ebpf(int fd)
 {
        const int len_off = __builtin_offsetof(struct __sk_buff, len);
@@ -270,7 +288,7 @@ static int test_datapath(uint16_t typeflags, int port_off,
                exit(1);
        }
        if (type == PACKET_FANOUT_CBPF)
-               sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA);
+               sock_fanout_set_cbpf(fds[0]);
        else if (type == PACKET_FANOUT_EBPF)
                sock_fanout_set_ebpf(fds[0]);
 
index a77da88bf9469d515713e1d1f0f6d318544ac458..7d990d6c861b5863f23fe1d29523707c48ac28d5 100644 (file)
@@ -38,7 +38,7 @@
 # define __maybe_unused                __attribute__ ((__unused__))
 #endif
 
-static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
+static __maybe_unused void pair_udp_setfilter(int fd)
 {
        /* the filter below checks for all of the following conditions that
         * are based on the contents of create_payload()
@@ -76,23 +76,16 @@ static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
        };
        struct sock_fprog bpf_prog;
 
-       if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
-               bpf_filter[5].code = 0x16;   /* RET A                         */
-
        bpf_prog.filter = bpf_filter;
        bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
-       if (setsockopt(fd, lvl, optnum, &bpf_prog,
+
+       if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
                       sizeof(bpf_prog))) {
                perror("setsockopt SO_ATTACH_FILTER");
                exit(1);
        }
 }
 
-static __maybe_unused void pair_udp_setfilter(int fd)
-{
-       sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
-}
-
 static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
 {
        struct sockaddr_in saddr, daddr;