]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 3 Aug 2014 16:58:20 +0000 (09:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 3 Aug 2014 16:58:20 +0000 (09:58 -0700)
Pull timer fixes from Thomas Gleixner:
 "Two fixes in the timer area:
   - a long-standing lock inversion due to a printk
   - suspend-related hrtimer corruption in sched_clock"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  timer: Fix lock inversion between hrtimer_bases.lock and scheduler locks
  sched_clock: Avoid corrupting hrtimer tree during suspend

141 files changed:
.mailmap
CREDITS
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/boot/dts/hi3620.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/boot/dts/ste-nomadik-s8815.dts
arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
arch/arm/crypto/aesbs-glue.c
arch/arm/include/asm/mach/arch.h
arch/arm/kernel/devtree.c
arch/arm/kernel/iwmmxt.S
arch/arm/kernel/kgdb.c
arch/arm/mach-exynos/exynos.c
arch/arm/mach-omap2/gpmc-nand.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/idmap.c
arch/arm/mm/mmu.c
arch/arm/xen/grant-table.c
arch/arm64/crypto/aes-glue.c
arch/blackfin/configs/BF609-EZKIT_defconfig
arch/blackfin/kernel/vmlinux.lds.S
arch/blackfin/mach-bf533/boards/blackstamp.c
arch/blackfin/mach-bf537/boards/cm_bf537e.c
arch/blackfin/mach-bf537/boards/cm_bf537u.c
arch/blackfin/mach-bf537/boards/tcm_bf537.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf561/boards/acvilon.c
arch/blackfin/mach-bf561/boards/cm_bf561.c
arch/blackfin/mach-bf561/boards/ezkit.c
arch/blackfin/mach-bf609/boards/ezkit.c
arch/blackfin/mach-bf609/include/mach/pm.h
arch/blackfin/mach-bf609/pm.c
arch/blackfin/mach-common/ints-priority.c
arch/parisc/include/uapi/asm/signal.h
arch/parisc/mm/init.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/platforms/powernv/opal-elog.c
arch/x86/include/asm/irqflags.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/xen/grant-table.c
crypto/af_alg.c
drivers/clk/ti/clk-7xx.c
drivers/firewire/ohci.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/hwmon/smsc47m192.c
drivers/iio/accel/bma180.c
drivers/iio/industrialio-buffer.c
drivers/isdn/gigaset/bas-gigaset.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-target.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/hyperv/netvsc.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/phy_device.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/r8152.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/of/fdt.c
drivers/parport/Kconfig
drivers/pnp/pnpacpi/core.c
drivers/rapidio/devices/tsi721_dma.c
drivers/scsi/scsi_lib.c
drivers/staging/rtl8723au/os_dep/usb_intf.c
drivers/staging/vt6655/bssdb.c
drivers/staging/vt6655/device_main.c
drivers/xen/grant-table.c
fs/afs/main.c
fs/direct-io.c
fs/fuse/inode.c
fs/namei.c
fs/open.c
include/dt-bindings/pinctrl/dra.h
include/linux/hugetlb.h
include/linux/of_fdt.h
include/net/ip.h
include/uapi/linux/fuse.h
include/xen/grant_table.h
kernel/events/core.c
kernel/kexec.c
kernel/kprobes.c
kernel/rcu/rcutorture.c
mm/filemap.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/migrate.c
mm/page-writeback.c
mm/page_alloc.c
net/compat.c
net/core/iovec.c
net/core/neighbour.c
net/ipv4/route.c
net/ipv6/ip6_output.c
net/mac80211/cfg.c
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_conn.c
net/sctp/associola.c
net/wireless/trace.h
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
sound/firewire/bebob/bebob_maudio.c
virt/kvm/arm/vgic.c

index df1baba43a64c7e3bab28b01ca66cbbe3e831f50..1ad68731fb47a9658733520c1982038cf6aaaa00 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -62,6 +62,11 @@ Jeff Garzik <jgarzik@pretzel.yyz.us>
 Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
 John Stultz <johnstul@us.ibm.com>
+<josh@joshtriplett.org> <josh@freedesktop.org>
+<josh@joshtriplett.org> <josh@kernel.org>
+<josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
+<josh@joshtriplett.org> <josht@us.ibm.com>
+<josh@joshtriplett.org> <josht@vnet.ibm.com>
 Juha Yrjola <at solidboot.com>
 Juha Yrjola <juha.yrjola@nokia.com>
 Juha Yrjola <juha.yrjola@solidboot.com>
diff --git a/CREDITS b/CREDITS
index 28ee1514b9deec0d19bca725b74ca6934d719f24..a80b66718f66550abee52ea2e13e1058917325c4 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3511,10 +3511,11 @@ S: MacGregor A.C.T 2615
 S: Australia
 
 N: Josh Triplett
-E: josh@freedesktop.org
-P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87  CA26 189B 9946 D0FE 7AFB
-D: rcutorture maintainer
+E: josh@joshtriplett.org
+P: 4096R/8AFF873D 758E 5042 E397 4BA3 3A9C  1E67 0ED9 A3DF 8AFF 873D
+D: RCU and rcutorture
 D: lock annotations, finding and fixing lock bugs
+D: kernel tinification
 
 N: Winfried Trümper
 E: winni@xpilot.org
index 86efa7e213c257e59f4285d30bad7555b259c566..c2066f4c3286195b48aa57e23ff929a1f09aab0e 100644 (file)
@@ -7424,7 +7424,7 @@ S:        Orphan
 F:     drivers/net/wireless/ray*
 
 RCUTORTURE MODULE
-M:     Josh Triplett <josh@freedesktop.org>
+M:     Josh Triplett <josh@joshtriplett.org>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
@@ -8922,7 +8922,7 @@ M:        Stephen Warren <swarren@wwwdotorg.org>
 M:     Thierry Reding <thierry.reding@gmail.com>
 L:     linux-tegra@vger.kernel.org
 Q:     http://patchwork.ozlabs.org/project/linux-tegra/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git
 S:     Supported
 N:     [^a-z]tegra
 
index 6b2774145d664289cca4b1a1a3799543d5f916c2..f6a7794e4db436c16246389642f86de3c9098699 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
@@ -688,6 +688,8 @@ KBUILD_CFLAGS       += -fomit-frame-pointer
 endif
 endif
 
+KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
+
 ifdef CONFIG_DEBUG_INFO
 KBUILD_CFLAGS  += -g
 KBUILD_AFLAGS  += -Wa,-gdwarf-2
index 88acf8bc1490a6cc150029f82e8965db80970257..290f02ee0157492ec037494c8db8bd5d036360dd 100644 (file)
@@ -313,7 +313,7 @@ config ARCH_MULTIPLATFORM
 config ARCH_INTEGRATOR
        bool "ARM Ltd. Integrator family"
        select ARM_AMBA
-       select ARM_PATCH_PHYS_VIRT
+       select ARM_PATCH_PHYS_VIRT if MMU
        select AUTO_ZRELADDR
        select COMMON_CLK
        select COMMON_CLK_VERSATILE
@@ -659,7 +659,7 @@ config ARCH_MSM
 config ARCH_SHMOBILE_LEGACY
        bool "Renesas ARM SoCs (non-multiplatform)"
        select ARCH_SHMOBILE
-       select ARM_PATCH_PHYS_VIRT
+       select ARM_PATCH_PHYS_VIRT if MMU
        select CLKDEV_LOOKUP
        select GENERIC_CLOCKEVENTS
        select HAVE_ARM_SCU if SMP
index ab1116d086be82bc405f599cd3f2677a749fae42..83a5b8685bd961818bfa8c9d6bea6bc3bff4ff24 100644 (file)
@@ -73,7 +73,7 @@
 
                L2: l2-cache {
                        compatible = "arm,pl310-cache";
-                       reg = <0xfc10000 0x100000>;
+                       reg = <0x100000 0x100000>;
                        interrupts = <0 15 4>;
                        cache-unified;
                        cache-level = <2>;
index 1fe45d1f75ec8d52aa8dd59a1e344a931fc32576..b15f1a77d684eac5bad3ea9f0defbf8d549a686a 100644 (file)
        };
 
        twl_power: power {
-               compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
+               compatible = "ti,twl4030-power-n900";
                ti,use_poweroff;
        };
 };
index 8d7ffaeff6e03fd3f4b3e56aebd313d4bd743d3d..79f68acfd5d46c9dd9bced05190dbb9173c5500b 100644 (file)
                        #clock-cells = <0>;
                        clock-output-names = "sd1";
                };
-               sd2_clk: sd3_clk@e615007c {
+               sd2_clk: sd3_clk@e615026c {
                        compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
-                       reg = <0 0xe615007c 0 4>;
+                       reg = <0 0xe615026c 0 4>;
                        clocks = <&pll1_div2_clk>;
                        #clock-cells = <0>;
                        clock-output-names = "sd2";
index f557feb997f46ff449df70859c0a3e3cf367c226..90d8b6c7a20506ee81e9e5258e0d4522d8cfeba8 100644 (file)
@@ -4,7 +4,7 @@
  */
 
 /dts-v1/;
-/include/ "ste-nomadik-stn8815.dtsi"
+#include "ste-nomadik-stn8815.dtsi"
 
 / {
        model = "Calao Systems USB-S8815";
index d316c955bd5f19a4c6683daf767c1aaf1376d106..dbcf521b017f207fff1286d3c02e3e6586cc761e 100644 (file)
@@ -1,7 +1,9 @@
 /*
  * Device Tree for the ST-Ericsson Nomadik 8815 STn8815 SoC
  */
-/include/ "skeleton.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include "skeleton.dtsi"
 
 / {
        #address-cells = <1>;
                        bus-width = <4>;
                        cap-mmc-highspeed;
                        cap-sd-highspeed;
-                       cd-gpios = <&gpio3 15 0x1>;
-                       cd-inverted;
+                       cd-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
                        vmmc-supply = <&vmmc_regulator>;
index 4522366da7595f2b1f2739beae84468955669983..15468fbbdea393ed020510abbb30145b52bce396 100644 (file)
@@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
                                dst += AES_BLOCK_SIZE;
                        } while (--blocks);
                }
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
@@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
                bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                  walk.nbytes, &ctx->dec, walk.iv);
                kernel_neon_end();
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        while (walk.nbytes) {
                u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
                        dst += AES_BLOCK_SIZE;
                        src += AES_BLOCK_SIZE;
                } while (--blocks);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
@@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
                bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                  walk.nbytes, &ctx->enc, walk.iv);
                kernel_neon_end();
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
@@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
                bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                  walk.nbytes, &ctx->dec, walk.iv);
                kernel_neon_end();
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
index 060a75e99263dd3613fb40eb1bf43998cae433d1..0406cb3f1af765685da88c54433b80737bc70152 100644 (file)
@@ -50,6 +50,7 @@ struct machine_desc {
        struct smp_operations   *smp;           /* SMP operations       */
        bool                    (*smp_init)(void);
        void                    (*fixup)(struct tag *, char **);
+       void                    (*dt_fixup)(void);
        void                    (*init_meminfo)(void);
        void                    (*reserve)(void);/* reserve mem blocks  */
        void                    (*map_io)(void);/* IO mapping function  */
index e94a157ddff11dca08e81e7182652c24d4d56a49..11c54de9f8cfa1e6227ea38dcfddddf2f67ee403 100644 (file)
@@ -212,7 +212,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
        mdesc_best = &__mach_desc_GENERIC_DT;
 #endif
 
-       if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys)))
+       if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
                return NULL;
 
        mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
@@ -237,6 +237,12 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
                dump_machine_table(); /* does not return */
        }
 
+       /* We really don't want to do this, but sometimes firmware provides buggy data */
+       if (mdesc->dt_fixup)
+               mdesc->dt_fixup();
+
+       early_init_dt_scan_nodes();
+
        /* Change machine number to match the mdesc we're using */
        __machine_arch_type = mdesc->nr;
 
index a5599cfc43cbb17280e26bc942c30853ab954c7f..2b32978ae905a1bc9b22c6b904be4e97f5277866 100644 (file)
@@ -94,13 +94,19 @@ ENTRY(iwmmxt_task_enable)
 
        mrc     p15, 0, r2, c2, c0, 0
        mov     r2, r2                          @ cpwait
+       bl      concan_save
 
-       teq     r1, #0                          @ test for last ownership
-       mov     lr, r9                          @ normal exit from exception
-       beq     concan_load                     @ no owner, skip save
+#ifdef CONFIG_PREEMPT_COUNT
+       get_thread_info r10
+#endif
+4:     dec_preempt_count r10, r3
+       mov     pc, r9                          @ normal exit from exception
 
 concan_save:
 
+       teq     r1, #0                          @ test for last ownership
+       beq     concan_load                     @ no owner, skip save
+
        tmrc    r2, wCon
 
        @ CUP? wCx
@@ -138,7 +144,7 @@ concan_dump:
        wstrd   wR15, [r1, #MMX_WR15]
 
 2:     teq     r0, #0                          @ anything to load?
-       beq     3f
+       moveq   pc, lr                          @ if not, return
 
 concan_load:
 
@@ -171,14 +177,9 @@ concan_load:
        @ clear CUP/MUP (only if r1 != 0)
        teq     r1, #0
        mov     r2, #0
-       beq     3f
-       tmcr    wCon, r2
+       moveq   pc, lr
 
-3:
-#ifdef CONFIG_PREEMPT_COUNT
-       get_thread_info r10
-#endif
-4:     dec_preempt_count r10, r3
+       tmcr    wCon, r2
        mov     pc, lr
 
 /*
index 778c2f7024ff57304227ce67665e749f39b05fc7..a74b53c1b7dfc3566e51957b6422da290121ff93 100644 (file)
@@ -160,12 +160,16 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
 static struct undef_hook kgdb_brkpt_hook = {
        .instr_mask             = 0xffffffff,
        .instr_val              = KGDB_BREAKINST,
+       .cpsr_mask              = MODE_MASK,
+       .cpsr_val               = SVC_MODE,
        .fn                     = kgdb_brk_fn
 };
 
 static struct undef_hook kgdb_compiled_brkpt_hook = {
        .instr_mask             = 0xffffffff,
        .instr_val              = KGDB_COMPILED_BREAK,
+       .cpsr_mask              = MODE_MASK,
+       .cpsr_val               = SVC_MODE,
        .fn                     = kgdb_compiled_brk_fn
 };
 
index 46d893fcbe8538ecccea346715487a66d3cc1e01..66c9b9614f3cce2d9bcd5f75ff344a67f81caa8e 100644 (file)
@@ -335,6 +335,15 @@ static void __init exynos_reserve(void)
 #endif
 }
 
+static void __init exynos_dt_fixup(void)
+{
+       /*
+        * Some versions of uboot pass garbage entries in the memory node,
+        * use the old CONFIG_ARM_NR_BANKS
+        */
+       of_fdt_limit_memory(8);
+}
+
 DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
        /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
        /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
@@ -348,4 +357,5 @@ DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
        .dt_compat      = exynos_dt_compat,
        .restart        = exynos_restart,
        .reserve        = exynos_reserve,
+       .dt_fixup       = exynos_dt_fixup,
 MACHINE_END
index 17cd39360afe853562b61a5dfb755f33ba5d7886..93914d2200691ca3e27a6b74d04ce3b52eb02703 100644 (file)
@@ -50,6 +50,16 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
                 soc_is_omap54xx() || soc_is_dra7xx())
                return 1;
 
+       if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
+                ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
+               if (cpu_is_omap24xx())
+                       return 0;
+               else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
+                       return 0;
+               else
+                       return 1;
+       }
+
        /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
         * which require H/W based ECC error detection */
        if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
@@ -57,14 +67,6 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
                 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
                return 0;
 
-       /*
-        * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
-        * and AM33xx derivates. Other chips may be added if confirmed to work.
-        */
-       if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
-           (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
-               return 0;
-
        /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
        if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
                return 1;
index 539e8106eb962811c7a159ee36257f84ea40d9c7..a0fe747634c1ff56d280159dafc78b9f2cd687ce 100644 (file)
@@ -168,6 +168,10 @@ static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
                smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
                break;
 
+       case L310_POWER_CTRL:
+               pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
+               return;
+
        default:
                WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
                return;
index 4c88935654caf5f28d4b05d82be57234822cd41b..1f88db06b133c1f253cfc6ced41483cf672ce4a0 100644 (file)
@@ -461,12 +461,21 @@ void __init dma_contiguous_remap(void)
                map.type = MT_MEMORY_DMA_READY;
 
                /*
-                * Clear previous low-memory mapping
+                * Clear previous low-memory mapping to ensure that the
+                * TLB does not see any conflicting entries, then flush
+                * the TLB of the old entries before creating new mappings.
+                *
+                * This ensures that any speculatively loaded TLB entries
+                * (even though they may be rare) can not cause any problems,
+                * and ensures that this code is architecturally compliant.
                 */
                for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
                     addr += PMD_SIZE)
                        pmd_clear(pmd_off_k(addr));
 
+               flush_tlb_kernel_range(__phys_to_virt(start),
+                                      __phys_to_virt(end));
+
                iotable_init(&map, 1);
        }
 }
index 8e0e52eb76b57d7f9d4208fafbcdf024be369c75..c447ec70e8687ac1c4c02d00bd5597b66ce8f8dd 100644 (file)
@@ -9,6 +9,11 @@
 #include <asm/sections.h>
 #include <asm/system_info.h>
 
+/*
+ * Note: accesses outside of the kernel image and the identity map area
+ * are not supported on any CPU using the idmap tables as its current
+ * page tables.
+ */
 pgd_t *idmap_pgd;
 phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
 
@@ -25,6 +30,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
                        pr_warning("Failed to allocate identity pmd.\n");
                        return;
                }
+               /*
+                * Copy the original PMD to ensure that the PMD entries for
+                * the kernel image are preserved.
+                */
+               if (!pud_none(*pud))
+                       memcpy(pmd, pmd_offset(pud, 0),
+                              PTRS_PER_PMD * sizeof(pmd_t));
                pud_populate(&init_mm, pud, pmd);
                pmd += pmd_index(addr);
        } else
index ab14b79b03f0d4b573f89d34f38b005d31d4cff5..6e3ba8d112a2e534d2ca42ed2f4e8dc388f3f452 100644 (file)
@@ -1406,8 +1406,8 @@ void __init early_paging_init(const struct machine_desc *mdesc,
                return;
 
        /* remap kernel code and data */
-       map_start = init_mm.start_code;
-       map_end   = init_mm.brk;
+       map_start = init_mm.start_code & PMD_MASK;
+       map_end   = ALIGN(init_mm.brk, PMD_SIZE);
 
        /* get a handle on things... */
        pgd0 = pgd_offset_k(0);
@@ -1442,7 +1442,7 @@ void __init early_paging_init(const struct machine_desc *mdesc,
        }
 
        /* remap pmds for kernel mapping */
-       phys = __pa(map_start) & PMD_MASK;
+       phys = __pa(map_start);
        do {
                *pmdk++ = __pmd(phys | pmdprot);
                phys += PMD_SIZE;
index 859a9bb002d54875e5089a940442d5015a9fe1a4..91cf08ba1e957d251fcb5b0028705a4cd52a7fd8 100644 (file)
@@ -51,3 +51,8 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
 {
        return -ENOSYS;
 }
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
+{
+       return 0;
+}
index 60f2f4c122561fc1f0a85b83305e706c163db4d5..79cd911ef88c3e95806db16f55bffc904ff961e2 100644 (file)
@@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
        for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
                aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
        for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
                aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_dec, rounds, blocks, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
                                first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
                                first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key1.key_enc, rounds, blocks,
                                (u8 *)ctx->key2.key_enc, walk.iv, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
 
@@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key1.key_dec, rounds, blocks,
                                (u8 *)ctx->key2.key_enc, walk.iv, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
 
index a7e9bfd84183d5b7c26924679f9bb33d88ac0118..fcec5ce71392d20712b15516c3786e43513b0d10 100644 (file)
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN_V3=y
+CONFIG_SPI_ADI_V3=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index ba35864b2b74b50e3ad32527419a83ef6abb9629..c9eec84aa258628d1c9d70fc545420dde30c8bc1 100644 (file)
@@ -145,7 +145,7 @@ SECTIONS
 
        .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
 #else
-       .init.data : AT(__data_lma + __data_len)
+       .init.data : AT(__data_lma + __data_len + 32)
        {
                __sinitdata = .;
                INIT_DATA
index 63b0e4fe760cd558d20a2f8359c1c975180b9c11..0ccf0cf4daaf92f8d38aa18962defd8a10707e7d 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
index c65c6dbda3da9af86231d299b4502f0d3d72edb9..1e7290ef35258da56c5521ed23a1b637286b7342 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index af58454b4bff3fdc4bffda53d5d252726fd4e4f8..c7495dc74690db99f52bfa30c94c62dd5faae63c 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index a0211225748d23f0d0f14cdba4fd99a34aa8208f..6b988ad653d8d1e69a1721ddfe4b7208bf7cebb9 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index 90138e6112c14b2f186640f22e61368a517fab94..1fe7ff286619f693c113faeac592ce3167d8de72 100644 (file)
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1",  "pinctrl-adi2.0", NULL, "can1"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0",  "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x",  "pinctrl-adi2.0", NULL, "atapi_alter"),
 #endif
        PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0",  "pinctrl-adi2.0", NULL, "nfc0"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", NULL, "keys_4x4"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+       PIN_MAP_MUX_GROUP("bf54x-keys", "4bit",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+       PIN_MAP_MUX_GROUP("bf54x-keys", "8bit",  "pinctrl-adi2.0", "keys_8x8grp", "keys"),
 };
 
 static int __init ezkit_init(void)
index 430b16d5ccb1124f6fd896d4c8651050695d9053..6ab951534d790b6060aeea63cda8c1b158d90470 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/jiffies.h>
 #include <linux/i2c-pca-platform.h>
 #include <linux/delay.h>
index 9f777df4cacce9e77ada5ec46ff1698a72fbe419..e862f7823e68db1b5ea374a295982c23db2800c8 100644 (file)
@@ -18,6 +18,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index 88dee43e7abe8c4e34da9016b2333c539ef6b9a2..2de71e8c104b1e2233c7eba47c42ad28d2ab60c9 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/spi/spi.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/delay.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
index 1ba4600de69f72b37ca503f7aab752b9f4dcdb97..e2c0b024ce88f2593f4551b711b3821d8ae3879e 100644 (file)
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
 {
 #define CONFIG_SMC_GCTL_VAL     0x00000010
 
-       if (!devm_pinctrl_get_select_default(&pdev->dev))
-               return -EBUSY;
        bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
        bfin_write32(SMC_B0CTL, 0x01002011);
        bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
 
 void bf609_nor_flash_exit(struct platform_device *pdev)
 {
-       devm_pinctrl_put(pdev->dev.pins->p);
        bfin_write32(SMC_GCTL, 0);
 }
 
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
        PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0",  "pinctrl-adi2.0", NULL, "smc0"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", NULL, "ppi2_16b"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_8b"),
-#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#else
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
-#endif
+       PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_display.0", "8bit",  "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_display.0", "16bit",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit",  "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1",  "pinctrl-adi2.0", NULL, "sport1"),
index 3ca0fb965636ed3fad126ae021d9d87a6518ea8a..a1efd936dd30d2ec18e08c9d5b76bae6afabe99b 100644 (file)
@@ -10,6 +10,7 @@
 #define __MACH_BF609_PM_H__
 
 #include <linux/suspend.h>
+#include <linux/platform_device.h>
 
 extern int bfin609_pm_enter(suspend_state_t state);
 extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@ void bf609_hibernate(void);
 void bfin_sec_raise_irq(unsigned int sid);
 void coreb_enable(void);
 
-int bf609_nor_flash_init(void);
-void bf609_nor_flash_exit(void);
+int bf609_nor_flash_init(struct platform_device *pdev);
+void bf609_nor_flash_exit(struct platform_device *pdev);
 #endif
index 0cdd6955c7be5a80a2cb7d406b3fefb1eac9a9a9..b1bfcf434d16cfc25e999222c5c972f3f110d519 100644 (file)
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
 #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
 static int smc_pm_syscore_suspend(void)
 {
-       bf609_nor_flash_exit();
+       bf609_nor_flash_exit(NULL);
        return 0;
 }
 
 static void smc_pm_syscore_resume(void)
 {
-       bf609_nor_flash_init();
+       bf609_nor_flash_init(NULL);
 }
 
 static struct syscore_ops smc_pm_syscore_ops = {
index 867b7cef204cb91f0ed15e9386ba7ea0b2089d96..1f94784eab6d79b7f63673e20e2039fb41a07edb 100644 (file)
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
 
        bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
 
-       bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
-
        /* Enable interrupts IVG7-15 */
        bfin_irq_flags |= IMASK_IVG15 |
            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
index a2fa297196bc19f1de4f021ebec6e5338b237313..f5645d6a89f2c9c79e8e22cd9a7201df6dc389df 100644 (file)
@@ -69,8 +69,6 @@
 #define SA_NOMASK      SA_NODEFER
 #define SA_ONESHOT     SA_RESETHAND
 
-#define SA_RESTORER    0x04000000 /* obsolete -- ignored */
-
 #define MINSIGSTKSZ    2048
 #define SIGSTKSZ       8192
 
index ae085ad0fba03827df21874edfb6a6985c3338f8..0bef864264c0bb1e0d445c184ef79e51c9153caf 100644 (file)
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
 #endif
 
        empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
-       memset(empty_zero_page, 0, PAGE_SIZE);
 }
 
 static void __init gateway_init(void)
index 658e89d2025b0b2dd65bb812d2c89d65867c1015..db2b482af6585184b425f4913440d675de6b89a4 100644 (file)
@@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
        for (f = flist; f; f = next) {
                /* Translate data addrs to absolute */
                for (i = 0; i < f->num_blocks; i++) {
-                       f->blocks[i].data = (char *)__pa(f->blocks[i].data);
+                       f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
                        image_size += f->blocks[i].length;
+                       f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
                }
                next = f->next;
                /* Don't translate NULL pointer for last entry */
                if (f->next)
-                       f->next = (struct flash_block_list *)__pa(f->next);
+                       f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
                else
                        f->next = NULL;
                /* make num_blocks into the version/length field */
                f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
+               f->num_blocks = cpu_to_be64(f->num_blocks);
        }
 
        printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
index 6b0641c3f03f097c4aef760c9aa5fa307d2bcd5b..fe52db2eea6a35c4f1810e411beb6b9aeba4e703 100644 (file)
@@ -1307,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
  out_enable:
        pmao_restore_workaround(ebb);
 
+       if (ppmu->flags & PPMU_ARCH_207S)
+               mtspr(SPRN_MMCR2, 0);
+
        mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
 
        mb();
@@ -1315,9 +1318,6 @@ static void power_pmu_enable(struct pmu *pmu)
 
        write_mmcr0(cpuhw, mmcr0);
 
-       if (ppmu->flags & PPMU_ARCH_207S)
-               mtspr(SPRN_MMCR2, 0);
-
        /*
         * Enable instruction sampling if necessary
         */
index 10268c41d8302dd39ed73f4e0ad98dd6deb5e25f..0ad533b617f7888e5c45c27fd7ad4e7671e80f8c 100644 (file)
@@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work)
 
        rc = opal_get_elog_size(&id, &size, &type);
        if (rc != OPAL_SUCCESS) {
-               pr_err("ELOG: Opal log read failed\n");
+               pr_err("ELOG: OPAL log info read failed\n");
                return;
        }
 
@@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work)
        log_id = be64_to_cpu(id);
        elog_type = be64_to_cpu(type);
 
-       BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
+       WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
 
        if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
                elog_size  =  OPAL_MAX_ERRLOG_SIZE;
index bba3cf88e6249187a00fba403ed533648a728673..0a8b519226b8feb37368ffbc4ca81011bc031fde 100644 (file)
@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
 
 #define PARAVIRT_ADJUST_EXCEPTION_FRAME        /*  */
 
-#define INTERRUPT_RETURN       iretq
+#define INTERRUPT_RETURN       jmp native_iret
 #define USERGS_SYSRET64                                \
        swapgs;                                 \
        sysretq;
index a80029035bf2ae6acc5b4958df0dae905308d50b..f9e4fdd3b87736044840678a9fcf9afd6fc71b2f 100644 (file)
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
         */
        detect_extended_topology(c);
 
+       if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
+               /*
+                * let's use the legacy cpuid vector 0x1 and 0x4 for topology
+                * detection.
+                */
+               c->x86_max_cores = intel_num_cpu_cores(c);
+#ifdef CONFIG_X86_32
+               detect_ht(c);
+#endif
+       }
+
        l2 = init_intel_cacheinfo(c);
        if (c->cpuid_level > 9) {
                unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_P3);
 #endif
 
-       if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
-               /*
-                * let's use the legacy cpuid vector 0x1 and 0x4 for topology
-                * detection.
-                */
-               c->x86_max_cores = intel_num_cpu_cores(c);
-#ifdef CONFIG_X86_32
-               detect_ht(c);
-#endif
-       }
-
        /* Work around errata */
        srat_detect_node(c);
 
index a952e9c85b6fad81684c4bd39418bd5623a634ff..9c8f7394c612e7fa74d0ce356caa6d526dc02b0f 100644 (file)
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
 #endif
        }
 
+#ifdef CONFIG_X86_HT
+       /*
+        * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
+        * turns means that the only possibility is SMT (as indicated in
+        * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
+        * that SMT shares all caches, we can unconditionally set cpu_llc_id to
+        * c->phys_proc_id.
+        */
+       if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
+               per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+#endif
+
        c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
 
        return l2;
index bb92f38153b210a05fc16520e4b0bc44e828537d..9a79c8dbd8e87956374829d7a390b35e9330287d 100644 (file)
@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
        for_each_online_cpu(i) {
                err = mce_device_create(i);
                if (err) {
+                       /*
+                        * Register notifier anyway (and do not unreg it) so
+                        * that we don't leave undeleted timers, see notifier
+                        * callback above.
+                        */
+                       __register_hotcpu_notifier(&mce_cpu_notifier);
                        cpu_notifier_register_done();
                        goto err_device_create;
                }
@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
 err_register:
        unregister_syscore_ops(&mce_syscore_ops);
 
-       cpu_notifier_register_begin();
-       __unregister_hotcpu_notifier(&mce_cpu_notifier);
-       cpu_notifier_register_done();
-
 err_device_create:
        /*
         * We didn't keep track of which devices were created above, but
index 2bdfbff8a4f6165afb1e9931edcfb250c7113a34..2879ecdaac430c62710db3c90b326e6e25f60acf 100644 (file)
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
                        continue;
                if (event->attr.config1 & ~er->valid_mask)
                        return -EINVAL;
+               /* Check if the extra msrs can be safely accessed*/
+               if (!er->extra_msr_access)
+                       return -ENXIO;
 
                reg->idx = er->idx;
                reg->config = event->attr.config1;
index 3b2f9bdd974be198d0622e306ddbba427add2d25..8ade93111e0379fd79e0b421d29256b6c9b1b206 100644 (file)
@@ -295,14 +295,16 @@ struct extra_reg {
        u64                     config_mask;
        u64                     valid_mask;
        int                     idx;  /* per_xxx->regs[] reg index */
+       bool                    extra_msr_access;
 };
 
 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {     \
-       .event = (e),           \
-       .msr = (ms),            \
-       .config_mask = (m),     \
-       .valid_mask = (vm),     \
-       .idx = EXTRA_REG_##i,   \
+       .event = (e),                   \
+       .msr = (ms),                    \
+       .config_mask = (m),             \
+       .valid_mask = (vm),             \
+       .idx = EXTRA_REG_##i,           \
+       .extra_msr_access = true,       \
        }
 
 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)     \
index 07846d738bdb06dd42b44c3294359bb16e07cd48..2502d0d9d246a1fe63c57070176b2bcc04cad3bb 100644 (file)
@@ -2182,6 +2182,41 @@ static void intel_snb_check_microcode(void)
        }
 }
 
+/*
+ * Under certain circumstances, access certain MSR may cause #GP.
+ * The function tests if the input MSR can be safely accessed.
+ */
+static bool check_msr(unsigned long msr, u64 mask)
+{
+       u64 val_old, val_new, val_tmp;
+
+       /*
+        * Read the current value, change it and read it back to see if it
+        * matches, this is needed to detect certain hardware emulators
+        * (qemu/kvm) that don't trap on the MSR access and always return 0s.
+        */
+       if (rdmsrl_safe(msr, &val_old))
+               return false;
+
+       /*
+        * Only change the bits which can be updated by wrmsrl.
+        */
+       val_tmp = val_old ^ mask;
+       if (wrmsrl_safe(msr, val_tmp) ||
+           rdmsrl_safe(msr, &val_new))
+               return false;
+
+       if (val_new != val_tmp)
+               return false;
+
+       /* Here it's sure that the MSR can be safely accessed.
+        * Restore the old value and return.
+        */
+       wrmsrl(msr, val_old);
+
+       return true;
+}
+
 static __init void intel_sandybridge_quirk(void)
 {
        x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2271,7 +2306,8 @@ __init int intel_pmu_init(void)
        union cpuid10_ebx ebx;
        struct event_constraint *c;
        unsigned int unused;
-       int version;
+       struct extra_reg *er;
+       int version, i;
 
        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
                switch (boot_cpu_data.x86) {
@@ -2474,6 +2510,9 @@ __init int intel_pmu_init(void)
        case 62: /* IvyBridge EP */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
+               /* dTLB-load-misses on IVB is different than SNB */
+               hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
+
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
                       sizeof(hw_cache_extra_regs));
 
@@ -2574,6 +2613,34 @@ __init int intel_pmu_init(void)
                }
        }
 
+       /*
+        * Access LBR MSR may cause #GP under certain circumstances.
+        * E.g. KVM doesn't support LBR MSR
+        * Check all LBT MSR here.
+        * Disable LBR access if any LBR MSRs can not be accessed.
+        */
+       if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+               x86_pmu.lbr_nr = 0;
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
+               if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
+                     check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
+                       x86_pmu.lbr_nr = 0;
+       }
+
+       /*
+        * Access extra MSR may cause #GP under certain circumstances.
+        * E.g. KVM doesn't support offcore event
+        * Check all extra_regs here.
+        */
+       if (x86_pmu.extra_regs) {
+               for (er = x86_pmu.extra_regs; er->msr; er++) {
+                       er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
+                       /* Disable LBR select mapping */
+                       if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
+                               x86_pmu.lbr_sel_map = NULL;
+               }
+       }
+
        /* Support full width counters using alternative MSR range */
        if (x86_pmu.intel_cap.full_width_write) {
                x86_pmu.max_period = x86_pmu.cntval_mask;
index 980970cb744db6793a0f355f092db0e4a17b6ec5..696ade311ded7d01103323d159cff621b46d4a89 100644 (file)
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
        if (!x86_pmu.bts)
                return 0;
 
-       buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
-       if (unlikely(!buffer))
+       buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
+       if (unlikely(!buffer)) {
+               WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
                return -ENOMEM;
+       }
 
        max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
        thresh = max / 16;
index 65bbbea38b9c9c0f7246a3c4fee4176dd529647b..ae6552a0701f25330bd0e7027e5767ac32cdfdcd 100644 (file)
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
        SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
+
        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
        SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
index dbaa23e78b369721d5082af26cc18fa698985d09..0d0c9d4ab6d5b0d9cc1009ae03aebff22c1e8585 100644 (file)
@@ -425,8 +425,8 @@ sysenter_do_call:
        cmpl $(NR_syscalls), %eax
        jae sysenter_badsys
        call *sys_call_table(,%eax,4)
-       movl %eax,PT_EAX(%esp)
 sysenter_after_call:
+       movl %eax,PT_EAX(%esp)
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
@@ -502,6 +502,7 @@ ENTRY(system_call)
        jae syscall_badsys
 syscall_call:
        call *sys_call_table(,%eax,4)
+syscall_after_call:
        movl %eax,PT_EAX(%esp)          # store the return value
 syscall_exit:
        LOCKDEP_SYS_EXIT
@@ -675,12 +676,12 @@ syscall_fault:
 END(syscall_fault)
 
 syscall_badsys:
-       movl $-ENOSYS,PT_EAX(%esp)
-       jmp syscall_exit
+       movl $-ENOSYS,%eax
+       jmp syscall_after_call
 END(syscall_badsys)
 
 sysenter_badsys:
-       movl $-ENOSYS,PT_EAX(%esp)
+       movl $-ENOSYS,%eax
        jmp sysenter_after_call
 END(syscall_badsys)
        CFI_ENDPROC
index b25ca969edd27eca18210b013508d640775c6613..c844f0816ab8b383615fe9908010be92a7380d16 100644 (file)
@@ -830,27 +830,24 @@ restore_args:
        RESTORE_ARGS 1,8,1
 
 irq_return:
+       INTERRUPT_RETURN
+
+ENTRY(native_iret)
        /*
         * Are we returning to a stack segment from the LDT?  Note: in
         * 64-bit mode SS:RSP on the exception stack is always valid.
         */
 #ifdef CONFIG_X86_ESPFIX64
        testb $4,(SS-RIP)(%rsp)
-       jnz irq_return_ldt
+       jnz native_irq_return_ldt
 #endif
 
-irq_return_iret:
-       INTERRUPT_RETURN
-       _ASM_EXTABLE(irq_return_iret, bad_iret)
-
-#ifdef CONFIG_PARAVIRT
-ENTRY(native_iret)
+native_irq_return_iret:
        iretq
-       _ASM_EXTABLE(native_iret, bad_iret)
-#endif
+       _ASM_EXTABLE(native_irq_return_iret, bad_iret)
 
 #ifdef CONFIG_X86_ESPFIX64
-irq_return_ldt:
+native_irq_return_ldt:
        pushq_cfi %rax
        pushq_cfi %rdi
        SWAPGS
@@ -872,7 +869,7 @@ irq_return_ldt:
        SWAPGS
        movq %rax,%rsp
        popq_cfi %rax
-       jmp irq_return_iret
+       jmp native_irq_return_iret
 #endif
 
        .section .fixup,"ax"
@@ -956,13 +953,8 @@ __do_double_fault:
        cmpl $__KERNEL_CS,CS(%rdi)
        jne do_double_fault
        movq RIP(%rdi),%rax
-       cmpq $irq_return_iret,%rax
-#ifdef CONFIG_PARAVIRT
-       je 1f
-       cmpq $native_iret,%rax
-#endif
+       cmpq $native_irq_return_iret,%rax
        jne do_double_fault             /* This shouldn't happen... */
-1:
        movq PER_CPU_VAR(kernel_stack),%rax
        subq $(6*8-KERNEL_STACK_OFFSET),%rax    /* Reset to original stack */
        movq %rax,RSP(%rdi)
@@ -1428,7 +1420,7 @@ error_sti:
  */
 error_kernelspace:
        incl %ebx
-       leaq irq_return_iret(%rip),%rcx
+       leaq native_irq_return_iret(%rip),%rcx
        cmpq %rcx,RIP+8(%rsp)
        je error_swapgs
        movl %ecx,%eax  /* zero extend */
index 7596df664901eed5a7aea5003ab83da49d34a615..67e6d19ef1be65e49a176a1c3bbd43cab654e7f5 100644 (file)
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
        struct kprobe *p;
        struct kprobe_ctlblk *kcb;
 
+       if (user_mode_vm(regs))
+               return 0;
+
        addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
        /*
         * We don't want to be preempted for the entire
index 3f08f34f93ebc480041d0b448e3b12869450a91d..a1da6737ba5b80c4ee636204d49d4813348ef903 100644 (file)
@@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
-DEF_NATIVE(pv_cpu_ops, iret, "iretq");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
@@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_irq_ops, save_fl);
                PATCH_SITE(pv_irq_ops, irq_enable);
                PATCH_SITE(pv_irq_ops, irq_disable);
-               PATCH_SITE(pv_cpu_ops, iret);
                PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
                PATCH_SITE(pv_cpu_ops, usergs_sysret32);
                PATCH_SITE(pv_cpu_ops, usergs_sysret64);
index c985835885802a69ef0b255e36c81484026ab227..ebfa9b2c871db56a2a6a631e5502acdf96e2032e 100644 (file)
 
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 #include <xen/interface/xen.h>
 #include <xen/page.h>
 #include <xen/grant_table.h>
+#include <xen/xen.h>
 
 #include <asm/pgtable.h>
 
-static int map_pte_fn(pte_t *pte, struct page *pmd_page,
-                     unsigned long addr, void *data)
+static struct gnttab_vm_area {
+       struct vm_struct *area;
+       pte_t **ptes;
+} gnttab_shared_vm_area, gnttab_status_vm_area;
+
+int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
+                          unsigned long max_nr_gframes,
+                          void **__shared)
 {
-       unsigned long **frames = (unsigned long **)data;
+       void *shared = *__shared;
+       unsigned long addr;
+       unsigned long i;
 
-       set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-       (*frames)++;
-       return 0;
-}
+       if (shared == NULL)
+               *__shared = shared = gnttab_shared_vm_area.area->addr;
 
-/*
- * This function is used to map shared frames to store grant status. It is
- * different from map_pte_fn above, the frames type here is uint64_t.
- */
-static int map_pte_fn_status(pte_t *pte, struct page *pmd_page,
-                            unsigned long addr, void *data)
-{
-       uint64_t **frames = (uint64_t **)data;
+       addr = (unsigned long)shared;
+
+       for (i = 0; i < nr_gframes; i++) {
+               set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
+                          mfn_pte(frames[i], PAGE_KERNEL));
+               addr += PAGE_SIZE;
+       }
 
-       set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-       (*frames)++;
        return 0;
 }
 
-static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
-                       unsigned long addr, void *data)
+int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
+                          unsigned long max_nr_gframes,
+                          grant_status_t **__shared)
 {
+       grant_status_t *shared = *__shared;
+       unsigned long addr;
+       unsigned long i;
+
+       if (shared == NULL)
+               *__shared = shared = gnttab_status_vm_area.area->addr;
+
+       addr = (unsigned long)shared;
+
+       for (i = 0; i < nr_gframes; i++) {
+               set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
+                          mfn_pte(frames[i], PAGE_KERNEL));
+               addr += PAGE_SIZE;
+       }
 
-       set_pte_at(&init_mm, addr, pte, __pte(0));
        return 0;
 }
 
-int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
-                          unsigned long max_nr_gframes,
-                          void **__shared)
+void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
 {
-       int rc;
-       void *shared = *__shared;
+       pte_t **ptes;
+       unsigned long addr;
+       unsigned long i;
 
-       if (shared == NULL) {
-               struct vm_struct *area =
-                       alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
-               BUG_ON(area == NULL);
-               shared = area->addr;
-               *__shared = shared;
-       }
+       if (shared == gnttab_status_vm_area.area->addr)
+               ptes = gnttab_status_vm_area.ptes;
+       else
+               ptes = gnttab_shared_vm_area.ptes;
 
-       rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-                                PAGE_SIZE * nr_gframes,
-                                map_pte_fn, &frames);
-       return rc;
+       addr = (unsigned long)shared;
+
+       for (i = 0; i < nr_gframes; i++) {
+               set_pte_at(&init_mm, addr, ptes[i], __pte(0));
+               addr += PAGE_SIZE;
+       }
 }
 
-int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
-                          unsigned long max_nr_gframes,
-                          grant_status_t **__shared)
+static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
 {
-       int rc;
-       grant_status_t *shared = *__shared;
+       area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
+       if (area->ptes == NULL)
+               return -ENOMEM;
 
-       if (shared == NULL) {
-               /* No need to pass in PTE as we are going to do it
-                * in apply_to_page_range anyhow. */
-               struct vm_struct *area =
-                       alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
-               BUG_ON(area == NULL);
-               shared = area->addr;
-               *__shared = shared;
+       area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
+       if (area->area == NULL) {
+               kfree(area->ptes);
+               return -ENOMEM;
        }
 
-       rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-                                PAGE_SIZE * nr_gframes,
-                                map_pte_fn_status, &frames);
-       return rc;
+       return 0;
 }
 
-void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
+static void arch_gnttab_vfree(struct gnttab_vm_area *area)
+{
+       free_vm_area(area->area);
+       kfree(area->ptes);
+}
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
 {
-       apply_to_page_range(&init_mm, (unsigned long)shared,
-                           PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
+       int ret;
+
+       if (!xen_pv_domain())
+               return 0;
+
+       ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Always allocate the space for the status frames in case
+        * we're migrated to a host with V2 support.
+        */
+       ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
+       if (ret < 0)
+               goto err;
+
+       return 0;
+  err:
+       arch_gnttab_vfree(&gnttab_shared_vm_area);
+       return -ENOMEM;
 }
+
 #ifdef CONFIG_XEN_PVH
 #include <xen/balloon.h>
 #include <xen/events.h>
-#include <xen/xen.h>
 #include <linux/slab.h>
 static int __init xlated_setup_gnttab_pages(void)
 {
index 966f893711b393a3f4a7f22252b0d1cf6eb8dc98..6a3ad801158531435848a40936a9bd74a7a522e6 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/net.h>
 #include <linux/rwsem.h>
+#include <linux/security.h>
 
 struct alg_type_list {
        const struct af_alg_type *type;
@@ -243,6 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
 
        sock_init_data(newsock, sk2);
        sock_graft(sk2, newsock);
+       security_sk_clone(sk, sk2);
 
        err = type->accept(ask->private, sk2);
        if (err) {
index e1581335937d274dfddfd502cd5c4c896b305267..cb8e6f14e880ec62c5267b8dd09797ec5bdb7356 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
-#define DRA7_DPLL_ABE_DEFFREQ                          361267200
+#define DRA7_DPLL_ABE_DEFFREQ                          180633600
 #define DRA7_DPLL_GMAC_DEFFREQ                         1000000000
 
 
@@ -322,6 +322,11 @@ int __init dra7xx_dt_clk_init(void)
        if (rc)
                pr_err("%s: failed to configure ABE DPLL!\n", __func__);
 
+       dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
+       rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
+       if (rc)
+               pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
+
        dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
        rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
        if (rc)
index 57985410f12f7ea9770818457e2d9550d815a3dd..a66a3217f1d92f939fa4f6f2b1ee7a7ee976488f 100644 (file)
@@ -336,10 +336,10 @@ static const struct {
                QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
 
        {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
-               QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
+               QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
-               0},
+               QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
index f36126383d260166a950ad20c72b69637246c550..d893e4da5dcef9cc0c30d873f6f3d6033a27a03d 100644 (file)
@@ -1616,22 +1616,6 @@ out:
        return ret;
 }
 
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
-       struct i915_vma *vma;
-
-       /*
-        * Only the global gtt is relevant for gtt memory mappings, so restrict
-        * list traversal to objects bound into the global address space. Note
-        * that the active list should be empty, but better safe than sorry.
-        */
-       WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
-       list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-       list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-}
-
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        obj->fault_mappable = false;
 }
 
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       struct drm_i915_gem_object *obj;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+               i915_gem_release_mmap(obj);
+}
+
 uint32_t
 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
index 3521f998a1788488b8c396860f6433ffacb81ae2..34894b57306401645a2184d48d99ed2966c8c9f1 100644 (file)
@@ -31,7 +31,7 @@
 struct i915_render_state {
        struct drm_i915_gem_object *obj;
        unsigned long ggtt_offset;
-       void *batch;
+       u32 *batch;
        u32 size;
        u32 len;
 };
@@ -80,7 +80,7 @@ free:
 
 static void render_state_free(struct i915_render_state *so)
 {
-       kunmap(so->batch);
+       kunmap(kmap_to_page(so->batch));
        i915_gem_object_ggtt_unpin(so->obj);
        drm_gem_object_unreference(&so->obj->base);
        kfree(so);
index 267f069765adedffb942d1352649502ac624a364..c05c84f3f091382729b5cfcfa867471faed06a83 100644 (file)
@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_engine_cs *signaller;
-       u32 seqno, ctl;
+       u32 seqno;
 
        ring->hangcheck.deadlock++;
 
@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
        if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
                return -1;
 
-       /* cursory check for an unkickable deadlock */
-       ctl = I915_READ_CTL(signaller);
-       if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
-               return -1;
-
        if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
                return 1;
 
-       if (signaller->hangcheck.deadlock)
+       /* cursory check for an unkickable deadlock */
+       if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+           semaphore_passed(signaller) < 0)
                return -1;
 
        return 0;
index 0b2471107137a90af6677ece89a7351c9b97f599..c0ea66192fe03a8fe977ac0aea977a07204530c4 100644 (file)
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
                                gb_tile_moden = 0;
                                break;
                        }
+                       rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
        } else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index 250bac3935a43954c4fff467cdd7da694e1d57c3..15e4f28015e1e74fcc0a3e6189feee1274fa1794 100644 (file)
@@ -4756,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index c66952d4b00cc5d706a13650fe2d82ef71389f1d..3c69f58e46efd94b02a440e15e8aad66f9241712 100644 (file)
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index b7204500a9a6970f863c09fe82aa495975b4092a..60c47f8291222369f7f6070953eea7bda1a25ce9 100644 (file)
@@ -449,6 +449,7 @@ struct radeon_bo_va {
 
        /* protected by vm mutex */
        struct list_head                vm_list;
+       struct list_head                vm_status;
 
        /* constant after initialization */
        struct radeon_vm                *vm;
@@ -867,6 +868,9 @@ struct radeon_vm {
        struct list_head                va;
        unsigned                        id;
 
+       /* BOs freed, but not yet updated in the PT */
+       struct list_head                freed;
+
        /* contains the page directory */
        struct radeon_bo                *page_directory;
        uint64_t                        pd_gpu_addr;
@@ -875,6 +879,8 @@ struct radeon_vm {
        /* array of page tables, one for each page directory entry */
        struct radeon_vm_pt             *page_tables;
 
+       struct radeon_bo_va             *ib_bo_va;
+
        struct mutex                    mutex;
        /* last fence for cs using this vm */
        struct radeon_fence             *fence;
@@ -2832,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
 int radeon_vm_update_page_directory(struct radeon_device *rdev,
                                    struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+                         struct radeon_vm *vm);
 int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
+                       struct radeon_bo_va *bo_va,
                        struct ttm_mem_reg *mem);
 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
                             struct radeon_bo *bo);
@@ -2847,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                          struct radeon_bo_va *bo_va,
                          uint64_t offset,
                          uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+                     struct radeon_bo_va *bo_va);
 
 /* audio */
 void r600_audio_update_hdmi(struct work_struct *work);
index 71a143461478a60f5d9bc8428a0276c38b78953b..ae763f60c8a0a23f551bffb5119770d94527931a 100644 (file)
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                                   struct radeon_vm *vm)
 {
        struct radeon_device *rdev = p->rdev;
+       struct radeon_bo_va *bo_va;
        int i, r;
 
        r = radeon_vm_update_page_directory(rdev, vm);
        if (r)
                return r;
 
-       r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+       r = radeon_vm_clear_freed(rdev, vm);
+       if (r)
+               return r;
+
+       if (vm->ib_bo_va == NULL) {
+               DRM_ERROR("Tmp BO not in VM!\n");
+               return -EINVAL;
+       }
+
+       r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
                                &rdev->ring_tmp_bo.bo->tbo.mem);
        if (r)
                return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                        continue;
 
                bo = p->relocs[i].robj;
-               r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+               bo_va = radeon_vm_bo_find(vm, bo);
+               if (bo_va == NULL) {
+                       dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+                       return -EINVAL;
+               }
+
+               r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
                if (r)
                        return r;
        }
index 03686fab842d3c2e0de237fa6fb027895a025a80..697add2cd4e34e89b6276659142f476ccf0c1e64 100644 (file)
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
        if (!radeon_check_pot_argument(radeon_vm_size)) {
                dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
-       if (radeon_vm_size < 4) {
-               dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+       if (radeon_vm_size < 1) {
+               dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
        /*
         * Max GPUVM size for Cayman, SI and CI are 40 bits.
         */
-       if (radeon_vm_size > 1024*1024) {
-               dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+       if (radeon_vm_size > 1024) {
+               dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
        /* defines number of bits in page table versus page directory,
         * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
         * page table and the remaining bits are in the page directory */
        if (radeon_vm_block_size < 9) {
-               dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+               dev_warn(rdev->dev, "VM page table size (%d) too small\n",
                         radeon_vm_block_size);
                radeon_vm_block_size = 9;
        }
 
        if (radeon_vm_block_size > 24 ||
-           radeon_vm_size < (1ull << radeon_vm_block_size)) {
-               dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+           (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+               dev_warn(rdev->dev, "VM page table size (%d) too large\n",
                         radeon_vm_block_size);
                radeon_vm_block_size = 9;
        }
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
        /* Adjust VM size here.
         * Max GPUVM size for cayman+ is 40 bits.
         */
-       rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+       rdev->vm_manager.max_pfn = radeon_vm_size << 18;
 
        /* Set asic functions */
        r = radeon_asic_init(rdev);
index cb1421369e3a2eaa111a8c6d5f3b1e248e72beec..e9e361084249c7deddd0dd680ced4d95a845d31d 100644 (file)
@@ -173,7 +173,7 @@ int radeon_dpm = -1;
 int radeon_aspm = -1;
 int radeon_runtime_pm = -1;
 int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
+int radeon_vm_size = 4;
 int radeon_vm_block_size = 9;
 int radeon_deep_color = 0;
 
@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
 MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
 module_param_named(hard_reset, radeon_hard_reset, int, 0444);
 
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
 module_param_named(vm_size, radeon_vm_size, int, 0444);
 
 MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
index 35d931881b4b80bb5f6989580e993af8796e7041..d25ae6acfd5a05d3bc3ab9775cc3dc604f27554a 100644 (file)
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        /* new gpu have virtual address space support */
        if (rdev->family >= CHIP_CAYMAN) {
                struct radeon_fpriv *fpriv;
-               struct radeon_bo_va *bo_va;
+               struct radeon_vm *vm;
                int r;
 
                fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return -ENOMEM;
                }
 
-               r = radeon_vm_init(rdev, &fpriv->vm);
+               vm = &fpriv->vm;
+               r = radeon_vm_init(rdev, vm);
                if (r) {
                        kfree(fpriv);
                        return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                if (rdev->accel_working) {
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (r) {
-                               radeon_vm_fini(rdev, &fpriv->vm);
+                               radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
                                return r;
                        }
 
                        /* map the ib pool buffer read only into
                         * virtual address space */
-                       bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-                                                rdev->ring_tmp_bo.bo);
-                       r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+                       vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+                                                       rdev->ring_tmp_bo.bo);
+                       r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+                                                 RADEON_VA_IB_OFFSET,
                                                  RADEON_VM_PAGE_READABLE |
                                                  RADEON_VM_PAGE_SNOOPED);
 
                        radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        if (r) {
-                               radeon_vm_fini(rdev, &fpriv->vm);
+                               radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
                                return r;
                        }
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
        /* new gpu have virtual address space support */
        if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
                struct radeon_fpriv *fpriv = file_priv->driver_priv;
-               struct radeon_bo_va *bo_va;
+               struct radeon_vm *vm = &fpriv->vm;
                int r;
 
                if (rdev->accel_working) {
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (!r) {
-                               bo_va = radeon_vm_bo_find(&fpriv->vm,
-                                                         rdev->ring_tmp_bo.bo);
-                               if (bo_va)
-                                       radeon_vm_bo_rmv(rdev, bo_va);
+                               if (vm->ib_bo_va)
+                                       radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
                                radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        }
                }
 
-               radeon_vm_fini(rdev, &fpriv->vm);
+               radeon_vm_fini(rdev, vm);
                kfree(fpriv);
                file_priv->driver_priv = NULL;
        }
index eecff6bbd34145c6bf975ca0f2f19ff9af8534ba..725d3669014f8ef2bbd61f637d5e3e98de2772eb 100644 (file)
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
        bo_va->ref_count = 1;
        INIT_LIST_HEAD(&bo_va->bo_list);
        INIT_LIST_HEAD(&bo_va->vm_list);
+       INIT_LIST_HEAD(&bo_va->vm_status);
 
        mutex_lock(&vm->mutex);
        list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                head = &tmp->vm_list;
        }
 
+       if (bo_va->soffset) {
+               /* add a clone of the bo_va to clear the old address */
+               tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+               if (!tmp) {
+                       mutex_unlock(&vm->mutex);
+                       return -ENOMEM;
+               }
+               tmp->soffset = bo_va->soffset;
+               tmp->eoffset = bo_va->eoffset;
+               tmp->vm = vm;
+               list_add(&tmp->vm_status, &vm->freed);
+       }
+
        bo_va->soffset = soffset;
        bo_va->eoffset = eoffset;
        bo_va->flags = flags;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
  * Object have to be reserved and mutex must be locked!
  */
 int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
+                       struct radeon_bo_va *bo_va,
                        struct ttm_mem_reg *mem)
 {
+       struct radeon_vm *vm = bo_va->vm;
        struct radeon_ib ib;
-       struct radeon_bo_va *bo_va;
        unsigned nptes, ndw;
        uint64_t addr;
        int r;
 
-       bo_va = radeon_vm_bo_find(vm, bo);
-       if (bo_va == NULL) {
-               dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
-               return -EINVAL;
-       }
 
        if (!bo_va->soffset) {
                dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
-                       bo, vm);
+                       bo_va->bo, vm);
                return -EINVAL;
        }
 
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 
        trace_radeon_vm_bo_update(bo_va);
 
-       nptes = radeon_bo_ngpu_pages(bo);
+       nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
 
        /* padding, etc. */
        ndw = 64;
@@ -910,6 +918,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
        return 0;
 }
 
+/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+                         struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+       int r;
+
+       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+               list_del(&bo_va->vm_status);
+               r = radeon_vm_bo_update(rdev, bo_va, NULL);
+               kfree(bo_va);
+               if (r)
+                       return r;
+       }
+       return 0;
+
+}
+
 /**
  * radeon_vm_bo_rmv - remove a bo to a specific vm
  *
@@ -917,27 +953,27 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
  * @bo_va: requested bo_va
  *
  * Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
  *
  * Object have to be reserved!
  */
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+                     struct radeon_bo_va *bo_va)
 {
-       int r = 0;
+       struct radeon_vm *vm = bo_va->vm;
 
-       mutex_lock(&bo_va->vm->mutex);
-       if (bo_va->soffset)
-               r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+       list_del(&bo_va->bo_list);
 
+       mutex_lock(&vm->mutex);
        list_del(&bo_va->vm_list);
-       mutex_unlock(&bo_va->vm->mutex);
-       list_del(&bo_va->bo_list);
 
-       kfree(bo_va);
-       return r;
+       if (bo_va->soffset) {
+               bo_va->bo = NULL;
+               list_add(&bo_va->vm_status, &vm->freed);
+       } else {
+               kfree(bo_va);
+       }
+
+       mutex_unlock(&vm->mutex);
 }
 
 /**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        int r;
 
        vm->id = 0;
+       vm->ib_bo_va = NULL;
        vm->fence = NULL;
        vm->last_flush = NULL;
        vm->last_id_use = NULL;
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->va);
+       INIT_LIST_HEAD(&vm->freed);
 
        pd_size = radeon_vm_directory_size(rdev);
        pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
                        kfree(bo_va);
                }
        }
-
+       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+               kfree(bo_va);
 
        for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
                radeon_bo_unref(&vm->page_tables[i].bo);
index eba0225259a457a3d494f39bfee13af0cdc865e6..9e854fd016dabac99c081896209ce9f732dc34f0 100644 (file)
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index 20da6ff183df9b6fb5b59554616a30f28aeeb96f..32e50be9c4ac1c41969fd9de2d1a6a3439a6ef27 100644 (file)
@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
        for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
                pi->at[i] = TRINITY_AT_DFLT;
 
-       /* There are stability issues reported on latops with
-        * bapm installed when switching between AC and battery
-        * power.  At the same time, some desktop boards hang
-        * if it's not enabled and dpm is enabled.
+       /* There are stability issues reported on with
+        * bapm enabled when switching between AC and battery
+        * power.  At the same time, some MSI boards hang
+        * if it's not enabled and dpm is enabled.  Just enable
+        * it for MSI boards right now.
         */
-       if (rdev->flags & RADEON_IS_MOBILITY)
-               pi->enable_bapm = false;
-       else
+       if (rdev->pdev->subsystem_vendor == 0x1462)
                pi->enable_bapm = true;
+       else
+               pi->enable_bapm = false;
        pi->enable_nbps_policy = true;
        pi->enable_sclk_ds = true;
        pi->enable_gfx_power_gating = true;
index efee4c59239fcff8aa7b675c01cb5b9ac6bab5bd..34b9a601ad078c394513e67a5dccc345c6c02fef 100644 (file)
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
  */
 static inline s8 TEMP_TO_REG(int val)
 {
-       return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
+       return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
 }
 
 static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
+       if (val > 255)
+               return -EINVAL;
 
        data->vrm = val;
        return count;
index a7e68c81f89dd7203d4e2cc91f1872015801baf0..a077cc86421b5eb24519ab25a61ebb41105af5e8 100644 (file)
 /* Defaults values */
 #define BMA180_DEF_PMODE       0
 #define BMA180_DEF_BW          20
-#define BMA180_DEF_SCALE       250
+#define BMA180_DEF_SCALE       2452
 
 /* Available values for sysfs */
 #define BMA180_FLP_FREQ_AVAILABLE \
        "10 20 40 75 150 300"
 #define BMA180_SCALE_AVAILABLE \
-       "0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980"
+       "0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417"
 
 struct bma180_data {
        struct i2c_client *client;
@@ -94,7 +94,7 @@ enum bma180_axis {
 };
 
 static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
-static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 };
+static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
 
 static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
 {
@@ -376,6 +376,8 @@ static int bma180_write_raw(struct iio_dev *indio_dev,
                mutex_unlock(&data->mutex);
                return ret;
        case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+               if (val2)
+                       return -EINVAL;
                mutex_lock(&data->mutex);
                ret = bma180_set_bw(data, val);
                mutex_unlock(&data->mutex);
index 36b1ae92e2392b5e8b59032fe9df02a8ee2aebf4..9f1a14009901e84f2015b56c5bc75a44b06d9ae3 100644 (file)
@@ -966,7 +966,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
 
        /* Now we have the two masks, work from least sig and build up sizes */
        for_each_set_bit(out_ind,
-                        indio_dev->active_scan_mask,
+                        buffer->scan_mask,
                         indio_dev->masklength) {
                in_ind = find_next_bit(indio_dev->active_scan_mask,
                                       indio_dev->masklength,
index c44950d3eb7b48f8a81fd16db9e46ecd15343a39..b7ae0a0dd5b6814c61332d3f56b4b7626754ddea 100644 (file)
@@ -2400,6 +2400,7 @@ allocerr:
 error:
        freeurbs(cs);
        usb_set_intfdata(interface, NULL);
+       usb_put_dev(udev);
        gigaset_freecs(cs);
        return rc;
 }
index 4e84095833dbce97972318a59841a75db0f2b241..d724459860d9a54eb1934b8762504f6fcdbb437e 100644 (file)
@@ -1541,7 +1541,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
        BUG_ON(block_size < 1 << SECTOR_SHIFT ||
               (block_size & (block_size - 1)));
 
-       c = kmalloc(sizeof(*c), GFP_KERNEL);
+       c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c) {
                r = -ENOMEM;
                goto bad_client;
index 5f054c44b485f27dec03bb5bed4c13e95dff18db..2c63326638b6d4d54af4499643ac10dd9d8ee33b 100644 (file)
@@ -231,7 +231,7 @@ struct cache {
        /*
         * cache_size entries, dirty if set
         */
-       dm_cblock_t nr_dirty;
+       atomic_t nr_dirty;
        unsigned long *dirty_bitset;
 
        /*
@@ -492,7 +492,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)
 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
 {
        if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
-               cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
+               atomic_inc(&cache->nr_dirty);
                policy_set_dirty(cache->policy, oblock);
        }
 }
@@ -501,8 +501,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
 {
        if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
                policy_clear_dirty(cache->policy, oblock);
-               cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
-               if (!from_cblock(cache->nr_dirty))
+               if (atomic_dec_return(&cache->nr_dirty) == 0)
                        dm_table_event(cache->ti->table);
        }
 }
@@ -2269,7 +2268,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        atomic_set(&cache->quiescing_ack, 0);
 
        r = -ENOMEM;
-       cache->nr_dirty = 0;
+       atomic_set(&cache->nr_dirty, 0);
        cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
        if (!cache->dirty_bitset) {
                *error = "could not allocate dirty bitset";
@@ -2808,7 +2807,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
 
                residency = policy_residency(cache->policy);
 
-               DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
+               DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
                       (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
                       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
                       (unsigned long long)nr_blocks_metadata,
@@ -2821,7 +2820,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
                       (unsigned) atomic_read(&cache->stats.write_miss),
                       (unsigned) atomic_read(&cache->stats.demotion),
                       (unsigned) atomic_read(&cache->stats.promotion),
-                      (unsigned long long) from_cblock(cache->nr_dirty));
+                      (unsigned long) atomic_read(&cache->nr_dirty));
 
                if (writethrough_mode(&cache->features))
                        DMEMIT("1 writethrough ");
index 824108cd9fd594a91c25b0b4a1d43d3341ad9a31..12430be6448acd86d4fa530cab24f6deb3a462a8 100644 (file)
@@ -287,7 +287,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
                        break;
                }
 
-               priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
+               priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
+                                                    resource_size(res));
                if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
                        dev_info(&pdev->dev, "control memory is not used for raminit\n");
                else
index c83584a26713e6633675a9e82a9ab251d9dce1c0..5a1891faba8a1f9df9090c3c1064af74cb5c2136 100644 (file)
@@ -339,7 +339,8 @@ static int xgbe_probe(struct platform_device *pdev)
        /* Calculate the number of Tx and Rx rings to be created */
        pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
                                     pdata->hw_feat.tx_ch_cnt);
-       if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
+       ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+       if (ret) {
                dev_err(dev, "error setting real tx queue count\n");
                goto err_io;
        }
index 4cab09d3f80729a2bd843cf6b0b6490f0a8e95ad..8206a293e6b41be5436e0b90753cf9406e034702 100644 (file)
@@ -346,6 +346,7 @@ struct sw_tx_bd {
        u8              flags;
 /* Set on the first BD descriptor when there is a split BD */
 #define BNX2X_TSO_SPLIT_BD             (1<<0)
+#define BNX2X_HAS_SECOND_PBD           (1<<1)
 };
 
 struct sw_rx_page {
index 4b875da1c7ed2afc0eec3854217b4919797f3e20..c43e7238de217eccc0745df007e8dfb9e1c4b2aa 100644 (file)
@@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        --nbd;
        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 
+       if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+               /* Skip second parse bd... */
+               --nbd;
+               bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+       }
+
        /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -3889,6 +3895,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        /* set encapsulation flag in start BD */
                        SET_FLAG(tx_start_bd->general_data,
                                 ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+                       tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
                        nbd++;
                } else if (xmit_type & XMIT_CSUM) {
                        /* Set PBD in checksum offload case w/o encapsulation */
index bd0600cf72660f3fbc5ac9b2ad37b481fa0969a0..25eddd90f48234e86991e8c6282ab11c047de76e 100644 (file)
@@ -379,6 +379,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        break;
                case PORT_FIBRE:
                case PORT_DA:
+               case PORT_NONE:
                        if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
                              bp->port.supported[1] & SUPPORTED_FIBRE)) {
                                DP(BNX2X_MSG_ETHTOOL,
index 16281ad2da12c04ee8324ec85835b541479788c5..4e615debe4729e2b103575fff027a2a491c661dd 100644 (file)
@@ -1149,6 +1149,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
                goto out;
        }
 
+       if (skb_padto(skb, ETH_ZLEN)) {
+               ret = NETDEV_TX_OK;
+               goto out;
+       }
+
        /* set the SKB transmit checksum */
        if (priv->desc_64b_en) {
                ret = bcmgenet_put_tx_csum(dev, skb);
index fd411d6e19a2f4e4d37675f1d27bd011584b8fa0..d813bfb1a847b527d27db6709eac8baac96a2513 100644 (file)
@@ -610,6 +610,13 @@ static int __vnet_tx_trigger(struct vnet_port *port)
        return err;
 }
 
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+       struct vio_driver_state *vio = &vnet->vio;
+
+       return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
 struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
 {
        unsigned int hash = vnet_hashfn(skb->data);
@@ -617,14 +624,19 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
        struct vnet_port *port;
 
        hlist_for_each_entry(port, hp, hash) {
+               if (!port_is_up(port))
+                       continue;
                if (ether_addr_equal(port->raddr, skb->data))
                        return port;
        }
-       port = NULL;
-       if (!list_empty(&vp->port_list))
-               port = list_entry(vp->port_list.next, struct vnet_port, list);
-
-       return port;
+       list_for_each_entry(port, &vp->port_list, list) {
+               if (!port->switch_port)
+                       continue;
+               if (!port_is_up(port))
+                       continue;
+               return port;
+       }
+       return NULL;
 }
 
 struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
index 4ed38eaecea805b72349f64b8dede105b5a4fcf4..d97d5f39a04e4a307bb1e5b53eaab18a85b6b828 100644 (file)
@@ -378,8 +378,10 @@ static int netvsc_init_buf(struct hv_device *device)
 
        net_device->send_section_map =
                kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
-       if (net_device->send_section_map == NULL)
+       if (net_device->send_section_map == NULL) {
+               ret = -ENOMEM;
                goto cleanup;
+       }
 
        goto exit;
 
index 4eaadcfcb0fe5ed2d5bd82a4632989916ade90e2..203651ebccb0fa21e419a2956188658344ab9dab 100644 (file)
@@ -255,6 +255,7 @@ int mdiobus_register(struct mii_bus *bus)
 
        bus->dev.parent = bus->parent;
        bus->dev.class = &mdio_bus_class;
+       bus->dev.driver = bus->parent->driver;
        bus->dev.groups = NULL;
        dev_set_name(&bus->dev, "%s", bus->id);
 
index 35d753d22f78b91d643548029df5b2e6eea64d49..22c57be4dfa0544199a516a00bbe16bf291c9b26 100644 (file)
@@ -355,7 +355,7 @@ int phy_device_register(struct phy_device *phydev)
        phydev->bus->phy_map[phydev->addr] = phydev;
 
        /* Run all of the fixups for this PHY */
-       err = phy_init_hw(phydev);
+       err = phy_scan_fixups(phydev);
        if (err) {
                pr_err("PHY %d failed to initialize\n", phydev->addr);
                goto out;
@@ -575,6 +575,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
                      u32 flags, phy_interface_t interface)
 {
        struct device *d = &phydev->dev;
+       struct module *bus_module;
        int err;
 
        /* Assume that if there is no driver, that it doesn't
@@ -599,6 +600,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
                return -EBUSY;
        }
 
+       /* Increment the bus module reference count */
+       bus_module = phydev->bus->dev.driver ?
+                    phydev->bus->dev.driver->owner : NULL;
+       if (!try_module_get(bus_module)) {
+               dev_err(&dev->dev, "failed to get the bus module\n");
+               return -EIO;
+       }
+
        phydev->attached_dev = dev;
        dev->phydev = phydev;
 
@@ -664,6 +673,10 @@ EXPORT_SYMBOL(phy_attach);
 void phy_detach(struct phy_device *phydev)
 {
        int i;
+
+       if (phydev->bus->dev.driver)
+               module_put(phydev->bus->dev.driver->owner);
+
        phydev->attached_dev->phydev = NULL;
        phydev->attached_dev = NULL;
        phy_suspend(phydev);
index 9ea4bfe5d31804ed7413ade40ec568a777b9739d..2a32d9167d3b931fb82c434bb0198e27bfc1d8ae 100644 (file)
@@ -341,6 +341,22 @@ next_desc:
                usb_driver_release_interface(driver, info->data);
                return -ENODEV;
        }
+
+       /* Some devices don't initialise properly. In particular
+        * the packet filter is not reset. There are devices that
+        * don't do reset all the way. So the packet filter should
+        * be set to a sane initial value.
+        */
+       usb_control_msg(dev->udev,
+                       usb_sndctrlpipe(dev->udev, 0),
+                       USB_CDC_SET_ETHERNET_PACKET_FILTER,
+                       USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+                       USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
+                       intf->cur_altsetting->desc.bInterfaceNumber,
+                       NULL,
+                       0,
+                       USB_CTRL_SET_TIMEOUT
+               );
        return 0;
 
 bad_desc:
index 7bad2d316637ab1ac8f7d83d197facbaf3e4495e..3eab74c7c55406417b65ba5f80f869fc95c606ea 100644 (file)
 /* USB_DEV_STAT */
 #define STAT_SPEED_MASK                0x0006
 #define STAT_SPEED_HIGH                0x0000
-#define STAT_SPEED_FULL                0x0001
+#define STAT_SPEED_FULL                0x0002
 
 /* USB_TX_AGG */
 #define TX_AGG_MAX_THRESHOLD   0x03
@@ -2292,9 +2292,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
        /* rx share fifo credit full threshold */
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
-       ocp_data &= STAT_SPEED_MASK;
-       if (ocp_data == STAT_SPEED_FULL) {
+       if (tp->udev->speed == USB_SPEED_FULL ||
+           tp->udev->speed == USB_SPEED_LOW) {
                /* rx share fifo credit near full threshold */
                ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
                                RXFIFO_THR2_FULL);
index ade33ef82823b230a34890d77af039d8531aefb7..9f79192c9aa0130d978e01ea8c1585298686ea2a 100644 (file)
@@ -339,7 +339,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
        ndm->ndm_state = fdb->state;
        ndm->ndm_ifindex = vxlan->dev->ifindex;
        ndm->ndm_flags = fdb->flags;
-       ndm->ndm_type = NDA_DST;
+       ndm->ndm_type = RTN_UNICAST;
 
        if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
                goto nla_put_failure;
index 66acb2cbd9df3cc45c307bb6b38118436da3d01a..7c28cb55610b377a1a3651b54c901f9c9ec97f97 100644 (file)
@@ -887,6 +887,15 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
 
                tx_info = IEEE80211_SKB_CB(skb);
                tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
+               /*
+                * No aggregation session is running, but there may be frames
+                * from a previous session or a failed attempt in the queue.
+                * Send them out as normal data frames
+                */
+               if (!tid->active)
+                       tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
                if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
                        bf->bf_state.bf_type = 0;
                        return bf;
index 725ba49576bf640a41194cc4539176f70118c9d0..8b79081d4885122d8264bbed3c896150b4e206a9 100644 (file)
@@ -1072,8 +1072,12 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
        /* Fill the common data for all mac context types */
        iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
 
-       /* Also enable probe requests to pass */
-       cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+       /*
+        * pass probe requests and beacons from other APs (needed
+        * for ht protection)
+        */
+       cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+                                       MAC_FILTER_IN_BEACON);
 
        /* Fill the data specific for ap mode */
        iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
@@ -1094,6 +1098,13 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
        /* Fill the common data for all mac context types */
        iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
 
+       /*
+        * pass probe requests and beacons from other APs (needed
+        * for ht protection)
+        */
+       cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+                                       MAC_FILTER_IN_BEACON);
+
        /* Fill the data specific for GO mode */
        iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
                                     action == FW_CTXT_ACTION_ADD);
index 9bfb90680cdcb2e6d8d716a04798322c4c9b2174..98556d03c1edabf8e521998d4eec0f4a41a89fdf 100644 (file)
@@ -303,13 +303,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
        }
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
-           !iwlwifi_mod_params.uapsd_disable) {
-               hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
-               hw->uapsd_queues = IWL_UAPSD_AC_INFO;
-               hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
-       }
-
        hw->sta_data_size = sizeof(struct iwl_mvm_sta);
        hw->vif_data_size = sizeof(struct iwl_mvm_vif);
        hw->chanctx_data_size = sizeof(u16);
index b777d8f46bd59696e366f957c27211411b32ef76..9aa012e6ea0a6ed988150624bacc0eb2b8066c06 100644 (file)
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #include <asm/page.h>
 
+/*
+ * of_fdt_limit_memory - limit the number of regions in the /memory node
+ * @limit: maximum entries
+ *
+ * Adjust the flattened device tree to have at most 'limit' number of
+ * memory entries in the /memory node. This function may be called
+ * any time after initial_boot_param is set.
+ */
+void of_fdt_limit_memory(int limit)
+{
+       int memory;
+       int len;
+       const void *val;
+       int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
+       int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
+       const uint32_t *addr_prop;
+       const uint32_t *size_prop;
+       int root_offset;
+       int cell_size;
+
+       root_offset = fdt_path_offset(initial_boot_params, "/");
+       if (root_offset < 0)
+               return;
+
+       addr_prop = fdt_getprop(initial_boot_params, root_offset,
+                               "#address-cells", NULL);
+       if (addr_prop)
+               nr_address_cells = fdt32_to_cpu(*addr_prop);
+
+       size_prop = fdt_getprop(initial_boot_params, root_offset,
+                               "#size-cells", NULL);
+       if (size_prop)
+               nr_size_cells = fdt32_to_cpu(*size_prop);
+
+       cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
+
+       memory = fdt_path_offset(initial_boot_params, "/memory");
+       if (memory > 0) {
+               val = fdt_getprop(initial_boot_params, memory, "reg", &len);
+               if (len > limit*cell_size) {
+                       len = limit*cell_size;
+                       pr_debug("Limiting number of entries to %d\n", limit);
+                       fdt_setprop(initial_boot_params, memory, "reg", val,
+                                       len);
+               }
+       }
+}
+
 /**
  * of_fdt_is_compatible - Return true if given node from the given blob has
  * compat in its compatible list
@@ -937,7 +985,7 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
 }
 #endif
 
-bool __init early_init_dt_scan(void *params)
+bool __init early_init_dt_verify(void *params)
 {
        if (!params)
                return false;
@@ -951,6 +999,12 @@ bool __init early_init_dt_scan(void *params)
                return false;
        }
 
+       return true;
+}
+
+
+void __init early_init_dt_scan_nodes(void)
+{
        /* Retrieve various information from the /chosen node */
        of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
 
@@ -959,7 +1013,17 @@ bool __init early_init_dt_scan(void *params)
 
        /* Setup memory, calling early_init_dt_add_memory_arch */
        of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
+
+bool __init early_init_dt_scan(void *params)
+{
+       bool status;
+
+       status = early_init_dt_verify(params);
+       if (!status)
+               return false;
 
+       early_init_dt_scan_nodes();
        return true;
 }
 
index 2872ece81f358df7ff139c143e86d58031485b14..44333bd8f90886260e2219acad391dadaa9fd095 100644 (file)
@@ -5,6 +5,12 @@
 # Parport configuration.
 #
 
+config ARCH_MIGHT_HAVE_PC_PARPORT
+       bool
+       help
+         Select this config option from the architecture Kconfig if
+         the architecture might have PC parallel port hardware.
+
 menuconfig PARPORT
        tristate "Parallel port support"
        depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
 
          If unsure, say Y.
 
-config ARCH_MIGHT_HAVE_PC_PARPORT
-       bool
-       help
-         Select this config option from the architecture Kconfig if
-         the architecture might have PC parallel port hardware.
-
 if PARPORT
 
 config PARPORT_PC
index b81448b2c75da0c8c5d1803f3af64053f834e040..a5c6cb773e5f7b59078de680ec7aee69bb335dfb 100644 (file)
@@ -319,8 +319,7 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
        struct pnp_dev *pnp = _pnp;
 
        /* true means it matched */
-       return !acpi->physical_node_count
-           && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
+       return pnp->data == acpi;
 }
 
 static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
index 9b60b1f3261c1182e2c799e2affaf7e49605015c..44341dc5b148301b6e185d2bbc4e5d0248b0a3c5 100644 (file)
@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
                        "desc %p not ACKed\n", tx_desc);
        }
 
+       if (ret == NULL) {
+               dev_dbg(bdma_chan->dchan.device->dev,
+                       "%s: unable to obtain tx descriptor\n", __func__);
+               goto err_out;
+       }
+
        i = bdma_chan->wr_count_next % bdma_chan->bd_num;
        if (i == bdma_chan->bd_num - 1) {
                i = 0;
@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
        tx_desc->txd.phys = bdma_chan->bd_phys +
                                i * sizeof(struct tsi721_dma_desc);
        tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
-
+err_out:
        spin_unlock_bh(&bdma_chan->lock);
 
        return ret;
index f7e316368c99ea289deb85a66541a88f06577737..3f50dfcb3227a834069a530bd8628e1cf9ee603d 100644 (file)
@@ -733,6 +733,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        scsi_next_command(cmd);
                        return;
                }
+       } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
+               /*
+                * Certain non BLOCK_PC requests are commands that don't
+                * actually transfer anything (FLUSH), so cannot use
+                * good_bytes != blk_rq_bytes(req) as the signal for an error.
+                * This sets the error explicitly for the problem case.
+                */
+               error = __scsi_error_from_host_byte(cmd, result);
        }
 
        /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
index 8b25c1aa2025c785f5a57fae0a15923629ef96ea..ebb19b22f47ff5889bb8c218d20dc50ddd09e48a 100644 (file)
@@ -530,8 +530,10 @@ int rtw_resume_process23a(struct rtw_adapter *padapter)
        pwrpriv->bkeepfwalive = false;
 
        DBG_8723A("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
-       if (pm_netdev_open23a(pnetdev, true) != 0)
+       if (pm_netdev_open23a(pnetdev, true) != 0) {
+               up(&pwrpriv->lock);
                goto exit;
+       }
 
        netif_device_attach(pnetdev);
        netif_carrier_on(pnetdev);
index 59679cd46816c6b725298944ec9b73eabf593552..69b80e80b0110b3cf0ea4ec793d8311620f2cd5e 100644 (file)
@@ -981,7 +981,7 @@ start:
                pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
        }
 
-       {
+       if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
                pDevice->byReAssocCount++;
                /* 10 sec timeout */
                if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
index 1d3908d044d09e03f5b6dae1c19ecfef45f70bde..5a5fd937a4426ea3fcdaf1029c4e48346281b5a9 100644 (file)
@@ -2318,6 +2318,7 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
        int             handled = 0;
        unsigned char byData = 0;
        int             ii = 0;
+       unsigned long flags;
 
        MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
 
@@ -2331,7 +2332,8 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
 
        handled = 1;
        MACvIntDisable(pDevice->PortOffset);
-       spin_lock_irq(&pDevice->lock);
+
+       spin_lock_irqsave(&pDevice->lock, flags);
 
        //Make sure current page is 0
        VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
@@ -2560,7 +2562,8 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
        if (byOrgPageSel == 1)
                MACvSelectPage1(pDevice->PortOffset);
 
-       spin_unlock_irq(&pDevice->lock);
+       spin_unlock_irqrestore(&pDevice->lock, flags);
+
        MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
 
        return IRQ_RETVAL(handled);
index 5d4de88fe5b8aaf9792c37188931f8edeed1fb0f..eeba7544f0cd4a7dc54119b9a2df2f7c13a975db 100644 (file)
@@ -1195,18 +1195,20 @@ static int gnttab_expand(unsigned int req_entries)
 int gnttab_init(void)
 {
        int i;
+       unsigned long max_nr_grant_frames;
        unsigned int max_nr_glist_frames, nr_glist_frames;
        unsigned int nr_init_grefs;
        int ret;
 
        gnttab_request_version();
+       max_nr_grant_frames = gnttab_max_grant_frames();
        nr_grant_frames = 1;
 
        /* Determine the maximum number of frames required for the
         * grant reference free list on the current hypervisor.
         */
        BUG_ON(grefs_per_grant_frame == 0);
-       max_nr_glist_frames = (gnttab_max_grant_frames() *
+       max_nr_glist_frames = (max_nr_grant_frames *
                               grefs_per_grant_frame / RPP);
 
        gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1223,6 +1225,11 @@ int gnttab_init(void)
                }
        }
 
+       ret = arch_gnttab_init(max_nr_grant_frames,
+                              nr_status_frames(max_nr_grant_frames));
+       if (ret < 0)
+               goto ini_nomem;
+
        if (gnttab_setup() < 0) {
                ret = -ENODEV;
                goto ini_nomem;
index 42dd2e499ed8902c08bddbba1ff1723363f833dd..35de0c04729fdbe357b10ddab7f870f3b560d8cc 100644 (file)
@@ -55,13 +55,13 @@ static int __init afs_get_client_UUID(void)
        afs_uuid.time_low = uuidtime;
        afs_uuid.time_mid = uuidtime >> 32;
        afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
-       afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME;
+       afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME;
 
        get_random_bytes(&clockseq, 2);
        afs_uuid.clock_seq_low = clockseq;
        afs_uuid.clock_seq_hi_and_reserved =
                (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
-       afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD;
+       afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD;
 
        _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
               afs_uuid.time_low,
index 98040ba388ac1e2db62f96f253bc141758013b10..17e39b047de5b8a64349c86a5f9432efcb19f2e5 100644 (file)
@@ -71,7 +71,6 @@ struct dio_submit {
                                           been performed at the start of a
                                           write */
        int pages_in_io;                /* approximate total IO pages */
-       size_t  size;                   /* total request size (doesn't change)*/
        sector_t block_in_file;         /* Current offset into the underlying
                                           file in dio_block units. */
        unsigned blocks_available;      /* At block_in_file.  changes */
@@ -198,9 +197,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
  * L1 cache.
  */
 static inline struct page *dio_get_page(struct dio *dio,
-               struct dio_submit *sdio, size_t *from, size_t *to)
+                                       struct dio_submit *sdio)
 {
-       int n;
        if (dio_pages_present(sdio) == 0) {
                int ret;
 
@@ -209,10 +207,7 @@ static inline struct page *dio_get_page(struct dio *dio,
                        return ERR_PTR(ret);
                BUG_ON(dio_pages_present(sdio) == 0);
        }
-       n = sdio->head++;
-       *from = n ? 0 : sdio->from;
-       *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
-       return dio->pages[n];
+       return dio->pages[sdio->head];
 }
 
 /**
@@ -911,11 +906,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
        while (sdio->block_in_file < sdio->final_block_in_request) {
                struct page *page;
                size_t from, to;
-               page = dio_get_page(dio, sdio, &from, &to);
+
+               page = dio_get_page(dio, sdio);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
                }
+               from = sdio->head ? 0 : sdio->from;
+               to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
+               sdio->head++;
 
                while (from < to) {
                        unsigned this_chunk_bytes;      /* # of bytes mapped */
@@ -1104,7 +1103,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        unsigned blkbits = i_blkbits;
        unsigned blocksize_mask = (1 << blkbits) - 1;
        ssize_t retval = -EINVAL;
-       loff_t end = offset + iov_iter_count(iter);
+       size_t count = iov_iter_count(iter);
+       loff_t end = offset + count;
        struct dio *dio;
        struct dio_submit sdio = { 0, };
        struct buffer_head map_bh = { 0, };
@@ -1287,10 +1287,9 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
         */
        BUG_ON(retval == -EIOCBQUEUED);
        if (dio->is_async && retval == 0 && dio->result &&
-           ((rw == READ) || (dio->result == sdio.size)))
+           (rw == READ || dio->result == count))
                retval = -EIOCBQUEUED;
-
-       if (retval != -EIOCBQUEUED)
+       else
                dio_await_completion(dio);
 
        if (drop_refcount(dio) == 0) {
index 8474028d7848064912b34df229b893c39f6144e9..03246cd9d47a7b27d93ce0c2a122cae89d5fbd4f 100644 (file)
@@ -907,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                                fc->writeback_cache = 1;
                        if (arg->time_gran && arg->time_gran <= 1000000000)
                                fc->sb->s_time_gran = arg->time_gran;
-                       else
-                               fc->sb->s_time_gran = 1000000000;
-
                } else {
                        ra_pages = fc->max_read / PAGE_CACHE_SIZE;
                        fc->no_lock = 1;
@@ -938,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
                FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
                FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
                FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
-               FUSE_WRITEBACK_CACHE;
+               FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
        req->in.h.opcode = FUSE_INIT;
        req->in.numargs = 1;
        req->in.args[0].size = sizeof(*arg);
index 985c6f3684859e17439d62c7d319611305414437..9eb787e5c167fb601845590f0181d249bf515fb0 100644 (file)
@@ -2256,9 +2256,10 @@ done:
                goto out;
        }
        path->dentry = dentry;
-       path->mnt = mntget(nd->path.mnt);
+       path->mnt = nd->path.mnt;
        if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
                return 1;
+       mntget(path->mnt);
        follow_mount(path);
        error = 0;
 out:
index 36662d0362379698fcb5764fbb142337732b3919..d6fd3acde134d9669391cd064960790dea586042 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -263,11 +263,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                return -EPERM;
 
        /*
-        * We can not allow to do any fallocate operation on an active
-        * swapfile
+        * We cannot allow any fallocate operation on an active swapfile
         */
        if (IS_SWAPFILE(inode))
-               ret = -ETXTBSY;
+               return -ETXTBSY;
 
        /*
         * Revalidate the write permissions, in case security policy has
index 002a2855c046c7a59e77b914120367728b310e1d..3d33794e4f3edd75950e58cb7d5c49f69e33795b 100644 (file)
@@ -30,7 +30,8 @@
 #define MUX_MODE14     0xe
 #define MUX_MODE15     0xf
 
-#define PULL_ENA               (1 << 16)
+#define PULL_ENA               (0 << 16)
+#define PULL_DIS               (1 << 16)
 #define PULL_UP                        (1 << 17)
 #define INPUT_EN               (1 << 18)
 #define SLEWCONTROL            (1 << 19)
 #define WAKEUP_EVENT           (1 << 25)
 
 /* Active pin states */
-#define PIN_OUTPUT             0
+#define PIN_OUTPUT             (0 | PULL_DIS)
 #define PIN_OUTPUT_PULLUP      (PIN_OUTPUT | PULL_ENA | PULL_UP)
 #define PIN_OUTPUT_PULLDOWN    (PIN_OUTPUT | PULL_ENA)
-#define PIN_INPUT              INPUT_EN
+#define PIN_INPUT              (INPUT_EN | PULL_DIS)
 #define PIN_INPUT_SLEW         (INPUT_EN | SLEWCONTROL)
 #define PIN_INPUT_PULLUP       (PULL_ENA | INPUT_EN | PULL_UP)
 #define PIN_INPUT_PULLDOWN     (PULL_ENA | INPUT_EN)
index 255cd5cc07541034fd0f80e1d944ca775b6c78df..a23c096b30807c20db8d967dbb130f4842fa44e2 100644 (file)
@@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
 bool isolate_huge_page(struct page *page, struct list_head *list);
 void putback_active_hugepage(struct page *page);
 bool is_hugepage_active(struct page *page);
+void free_huge_page(struct page *page);
 
 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
index 05117899fcb4f43d7c539ec4ce41500e61b595b6..0ff360d5b3b3078b1ca85d29696d02741473edd0 100644 (file)
@@ -73,6 +73,8 @@ extern int early_init_dt_scan_root(unsigned long node, const char *uname,
                                   int depth, void *data);
 
 extern bool early_init_dt_scan(void *params);
+extern bool early_init_dt_verify(void *params);
+extern void early_init_dt_scan_nodes(void);
 
 extern const char *of_flat_dt_get_machine_name(void);
 extern const void *of_flat_dt_match_machine(const void *default_match,
@@ -84,6 +86,7 @@ extern void unflatten_and_copy_device_tree(void);
 extern void early_init_devtree(void *);
 extern void early_get_first_memblock_info(void *, phys_addr_t *);
 extern u64 fdt_translate_address(const void *blob, int node_offset);
+extern void of_fdt_limit_memory(int limit);
 #else /* CONFIG_OF_FLATTREE */
 static inline void early_init_fdt_scan_reserved_mem(void) {}
 static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
index 0e795df05ec983d07d7acc52cddabd728ea6e0b1..7596eb22e1ce3c8585eeca8fc30cd2bcae502e26 100644 (file)
@@ -309,16 +309,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
        }
 }
 
-#define IP_IDENTS_SZ 2048u
-extern atomic_t *ip_idents;
-
-static inline u32 ip_idents_reserve(u32 hash, int segs)
-{
-       atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ;
-
-       return atomic_add_return(segs, id_ptr) - segs;
-}
-
+u32 ip_idents_reserve(u32 hash, int segs);
 void __ip_select_ident(struct iphdr *iph, int segs);
 
 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
index 40b5ca8a1b1f3028e5e03c5b3372f98e39437422..25084a052a1eff964d19a9683d5d6470590e4c7e 100644 (file)
  *  - add FATTR_CTIME
  *  - add ctime and ctimensec to fuse_setattr_in
  *  - add FUSE_RENAME2 request
+ *  - add FUSE_NO_OPEN_SUPPORT flag
  */
 
 #ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
  * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
  * FUSE_ASYNC_DIO: asynchronous direct I/O submission
  * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
+ * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
  */
 #define FUSE_ASYNC_READ                (1 << 0)
 #define FUSE_POSIX_LOCKS       (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
 #define FUSE_READDIRPLUS_AUTO  (1 << 14)
 #define FUSE_ASYNC_DIO         (1 << 15)
 #define FUSE_WRITEBACK_CACHE   (1 << 16)
+#define FUSE_NO_OPEN_SUPPORT   (1 << 17)
 
 /**
  * CUSE INIT request/reply flags
index a5af2a26d94f3f698072b3bf70a444bdb5da7978..5c1aba154b64c833c9acb865bcc600057234b59a 100644 (file)
@@ -170,6 +170,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
        unmap->dev_bus_addr = 0;
 }
 
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
 int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
                           unsigned long max_nr_gframes,
                           void **__shared);
index b0c95f0f06fd37ee40ac508427441d27ef3f824f..6b17ac1b0c2a33c479a352fde633f47731807950 100644 (file)
@@ -7458,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       perf_remove_from_context(child_event, true);
+       /*
+        * Do not destroy the 'original' grouping; because of the context
+        * switch optimization the original events could've ended up in a
+        * random child task.
+        *
+        * If we were to destroy the original group, all group related
+        * operations would cease to function properly after this random
+        * child dies.
+        *
+        * Do destroy all inherited groups, we don't care about those
+        * and being thorough is better.
+        */
+       perf_remove_from_context(child_event, !!child_event->parent);
 
        /*
         * It can happen that the parent exits first, and has events
@@ -7474,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event,
 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 {
        struct perf_event *child_event, *next;
-       struct perf_event_context *child_ctx;
+       struct perf_event_context *child_ctx, *parent_ctx;
        unsigned long flags;
 
        if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7499,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
        raw_spin_lock(&child_ctx->lock);
        task_ctx_sched_out(child_ctx);
        child->perf_event_ctxp[ctxn] = NULL;
+
+       /*
+        * In order to avoid freeing: child_ctx->parent_ctx->task
+        * under perf_event_context::lock, grab another reference.
+        */
+       parent_ctx = child_ctx->parent_ctx;
+       if (parent_ctx)
+               get_ctx(parent_ctx);
+
        /*
         * If this context is a clone; unclone it so it can't get
         * swapped to another process while we're removing all
@@ -7508,6 +7529,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
        update_context_time(child_ctx);
        raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
 
+       /*
+        * Now that we no longer hold perf_event_context::lock, drop
+        * our extra child_ctx->parent_ctx reference.
+        */
+       if (parent_ctx)
+               put_ctx(parent_ctx);
+
        /*
         * Report the task dead after unscheduling the events so that we
         * won't get any samples after PERF_RECORD_EXIT. We can however still
index 369f41a9412481029354034d4c6e0193fe132f56..4b8f0c9258843246e9233b0a01f60455725feba9 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/swap.h>
 #include <linux/syscore_ops.h>
 #include <linux/compiler.h>
+#include <linux/hugetlb.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
@@ -1619,6 +1620,9 @@ static int __init crash_save_vmcoreinfo_init(void)
 #endif
        VMCOREINFO_NUMBER(PG_head_mask);
        VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
+#ifdef CONFIG_HUGETLBFS
+       VMCOREINFO_SYMBOL(free_huge_page);
+#endif
 
        arch_crash_save_vmcoreinfo();
        update_vmcoreinfo_note();
index 3214289df5a7a8f6917718a9a00f418794efeab1..734e9a7d280bd22a046566cc40b1edf159f250bf 100644 (file)
@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
 {
        unsigned long *iter;
        struct kprobe_blacklist_entry *ent;
-       unsigned long offset = 0, size = 0;
+       unsigned long entry, offset = 0, size = 0;
 
        for (iter = start; iter < end; iter++) {
-               if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) {
-                       pr_err("Failed to find blacklist %p\n", (void *)*iter);
+               entry = arch_deref_entry_point((void *)*iter);
+
+               if (!kernel_text_address(entry) ||
+                   !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+                       pr_err("Failed to find blacklist at %p\n",
+                               (void *)entry);
                        continue;
                }
 
                ent = kmalloc(sizeof(*ent), GFP_KERNEL);
                if (!ent)
                        return -ENOMEM;
-               ent->start_addr = *iter;
-               ent->end_addr = *iter + size;
+               ent->start_addr = entry;
+               ent->end_addr = entry + size;
                INIT_LIST_HEAD(&ent->list);
                list_add_tail(&ent->list, &kprobe_blacklist);
        }
index 7fa34f86e5bab4a0c99df15233e799b9aa8663de..948a7693748ed3d0be04fb022d5ac6a195903e3c 100644 (file)
@@ -18,7 +18,7 @@
  * Copyright (C) IBM Corporation, 2005, 2006
  *
  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
- *       Josh Triplett <josh@freedesktop.org>
+ *       Josh Triplett <josh@joshtriplett.org>
  *
  * See also:  Documentation/RCU/torture.txt
  */
@@ -51,7 +51,7 @@
 #include <linux/torture.h>
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
 
 
 torture_param(int, fqs_duration, 0,
index dafb06f70a09dd97b1fa690969a638f596714091..900edfaf6df5735a1979c866cd288c46c81e50ca 100644 (file)
@@ -1031,18 +1031,21 @@ EXPORT_SYMBOL(find_lock_entry);
  * @mapping: the address_space to search
  * @offset: the page index
  * @fgp_flags: PCG flags
- * @gfp_mask: gfp mask to use if a page is to be allocated
+ * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
+ * @radix_gfp_mask: gfp mask to use for radix tree node allocation
  *
  * Looks up the page cache slot at @mapping & @offset.
  *
- * PCG flags modify how the page is returned
+ * PCG flags modify how the page is returned.
  *
  * FGP_ACCESSED: the page will be marked accessed
  * FGP_LOCK: Page is return locked
  * FGP_CREAT: If page is not present then a new page is allocated using
- *             @gfp_mask and added to the page cache and the VM's LRU
- *             list. The page is returned locked and with an increased
- *             refcount. Otherwise, %NULL is returned.
+ *             @cache_gfp_mask and added to the page cache and the VM's LRU
+ *             list. If radix tree nodes are allocated during page cache
+ *             insertion then @radix_gfp_mask is used. The page is returned
+ *             locked and with an increased refcount. Otherwise, %NULL is
+ *             returned.
  *
  * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
  * if the GFP flags specified for FGP_CREAT are atomic.
index 9221c02ed9e2b8ab495e0b39482e2bcfa5ba9957..7a0a73d2fcff128850b32af9910a873d6fb384f5 100644 (file)
@@ -856,7 +856,7 @@ struct hstate *size_to_hstate(unsigned long size)
        return NULL;
 }
 
-static void free_huge_page(struct page *page)
+void free_huge_page(struct page *page)
 {
        /*
         * Can't pass hstate in here because it is called from the
index a2c7bcb0e6ebf10672f91b8117aef69d08b41ade..1f14a430c65691b5135c0752a5c9c151cbd2f0f9 100644 (file)
@@ -5415,8 +5415,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
 {
        struct mem_cgroup_eventfd_list *ev;
 
+       spin_lock(&memcg_oom_lock);
+
        list_for_each_entry(ev, &memcg->oom_notify, list)
                eventfd_signal(ev->eventfd, 1);
+
+       spin_unlock(&memcg_oom_lock);
        return 0;
 }
 
index 7211a73ba14d16155c1514c078b1e3c9d25e0b22..a013bc94ebbed4af3764e396b087475310ed5185 100644 (file)
@@ -895,7 +895,13 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
        struct page *hpage = *hpagep;
        struct page *ppage;
 
-       if (PageReserved(p) || PageSlab(p) || !PageLRU(p))
+       /*
+        * Here we are interested only in user-mapped pages, so skip any
+        * other types of pages.
+        */
+       if (PageReserved(p) || PageSlab(p))
+               return SWAP_SUCCESS;
+       if (!(PageLRU(hpage) || PageHuge(p)))
                return SWAP_SUCCESS;
 
        /*
@@ -905,8 +911,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
        if (!page_mapped(hpage))
                return SWAP_SUCCESS;
 
-       if (PageKsm(p))
+       if (PageKsm(p)) {
+               pr_err("MCE %#lx: can't handle KSM pages.\n", pfn);
                return SWAP_FAIL;
+       }
 
        if (PageSwapCache(p)) {
                printk(KERN_ERR
@@ -1229,7 +1237,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
            != SWAP_SUCCESS) {
-               printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
+               action_result(pfn, "unmapping failed", IGNORED);
                res = -EBUSY;
                goto out;
        }
index 7e8d8205b6108fcf5b0a97aad3da36c9455b230f..8b44f765b64584a9a2e7c6f8873d6fcb6acb7726 100644 (file)
@@ -2758,23 +2758,18 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
        update_mmu_cache(vma, address, pte);
 }
 
-static unsigned long fault_around_bytes = 65536;
+static unsigned long fault_around_bytes = rounddown_pow_of_two(65536);
 
-/*
- * fault_around_pages() and fault_around_mask() round down fault_around_bytes
- * to nearest page order. It's what do_fault_around() expects to see.
- */
 static inline unsigned long fault_around_pages(void)
 {
-       return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE;
+       return fault_around_bytes >> PAGE_SHIFT;
 }
 
 static inline unsigned long fault_around_mask(void)
 {
-       return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK;
+       return ~(fault_around_bytes - 1) & PAGE_MASK;
 }
 
-
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
 {
@@ -2782,11 +2777,19 @@ static int fault_around_bytes_get(void *data, u64 *val)
        return 0;
 }
 
+/*
+ * fault_around_pages() and fault_around_mask() expects fault_around_bytes
+ * rounded down to nearest page order. It's what do_fault_around() expects to
+ * see.
+ */
 static int fault_around_bytes_set(void *data, u64 val)
 {
        if (val / PAGE_SIZE > PTRS_PER_PTE)
                return -EINVAL;
-       fault_around_bytes = val;
+       if (val > PAGE_SIZE)
+               fault_around_bytes = rounddown_pow_of_two(val);
+       else
+               fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
        return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
index 9e0beaa918454abbcd63e94ee6cefb5f108f751f..be6dbf995c0cea7128fa58124057d8891cfa7933 100644 (file)
@@ -988,9 +988,10 @@ out:
         * it.  Otherwise, putback_lru_page() will drop the reference grabbed
         * during isolation.
         */
-       if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+       if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+               ClearPageSwapBacked(newpage);
                put_new_page(newpage, private);
-       else
+       else
                putback_lru_page(newpage);
 
        if (result) {
index 518e2c3f4c75f0014a03817b4910cf4050a2f480..e0c943014eb74ce3d4cb4c021d6f896740386d6d 100644 (file)
@@ -1306,9 +1306,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
        *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 
        if (bdi_bg_thresh)
-               *bdi_bg_thresh = div_u64((u64)*bdi_thresh *
-                                        background_thresh,
-                                        dirty_thresh);
+               *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
+                                                       background_thresh,
+                                                       dirty_thresh) : 0;
 
        /*
         * In order to avoid the stacked BDI deadlock we need
index 0ea758b898fdae74dbe5bf414b8995c4c944e7b3..ef44ad736ca17f79606021439b89a9fba4455564 100644 (file)
@@ -2447,7 +2447,7 @@ static inline int
 gfp_to_alloc_flags(gfp_t gfp_mask)
 {
        int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
-       const gfp_t wait = gfp_mask & __GFP_WAIT;
+       const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
 
        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
        BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
@@ -2456,20 +2456,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
         * The caller may dip into page reserves a bit more if the caller
         * cannot run direct reclaim, or if the caller has realtime scheduling
         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
-        * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
+        * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
         */
        alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
 
-       if (!wait) {
+       if (atomic) {
                /*
-                * Not worth trying to allocate harder for
-                * __GFP_NOMEMALLOC even if it can't schedule.
+                * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
+                * if it can't schedule.
                 */
-               if  (!(gfp_mask & __GFP_NOMEMALLOC))
+               if (!(gfp_mask & __GFP_NOMEMALLOC))
                        alloc_flags |= ALLOC_HARDER;
                /*
-                * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
-                * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+                * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
+                * comment for __cpuset_node_allowed_softwall().
                 */
                alloc_flags &= ~ALLOC_CPUSET;
        } else if (unlikely(rt_task(current)) && !in_interrupt())
@@ -6062,11 +6062,13 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
 }
 
 /**
- * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
+ * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  * @page: The page within the block of interest
- * @start_bitidx: The first bit of interest to retrieve
- * @end_bitidx: The last bit of interest
- * returns pageblock_bits flags
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest to retrieve
+ * @mask: mask of bits that the caller is interested in
+ *
+ * Return: pageblock_bits flags
  */
 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
                                        unsigned long end_bitidx,
@@ -6091,9 +6093,10 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
 /**
  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  * @page: The page within the block of interest
- * @start_bitidx: The first bit of interest
- * @end_bitidx: The last bit of interest
  * @flags: The flags to set
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest
+ * @mask: mask of bits that the caller is interested in
  */
 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
                                        unsigned long pfn,
index 9a76eaf63184753d6020eb1f3a9ab51299b1ae02..bc8aeefddf3f23cb259b15c319e05949b9f78901 100644 (file)
@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
 {
        int tot_len;
 
-       if (kern_msg->msg_namelen) {
+       if (kern_msg->msg_name && kern_msg->msg_namelen) {
                if (mode == VERIFY_READ) {
                        int err = move_addr_to_kernel(kern_msg->msg_name,
                                                      kern_msg->msg_namelen,
@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
                        if (err < 0)
                                return err;
                }
-               if (kern_msg->msg_name)
-                       kern_msg->msg_name = kern_address;
-       } else
+               kern_msg->msg_name = kern_address;
+       } else {
                kern_msg->msg_name = NULL;
+               kern_msg->msg_namelen = 0;
+       }
 
        tot_len = iov_from_user_compat_to_kern(kern_iov,
                                          (struct compat_iovec __user *)kern_msg->msg_iov,
index 827dd6beb49c4c70adc2960f1b3046eb48a03574..e1ec45ab1e63c6909073691907c80e57bac89643 100644 (file)
@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
 {
        int size, ct, err;
 
-       if (m->msg_namelen) {
+       if (m->msg_name && m->msg_namelen) {
                if (mode == VERIFY_READ) {
                        void __user *namep;
                        namep = (void __user __force *) m->msg_name;
@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
                        if (err < 0)
                                return err;
                }
-               if (m->msg_name)
-                       m->msg_name = address;
+               m->msg_name = address;
        } else {
                m->msg_name = NULL;
+               m->msg_namelen = 0;
        }
 
        size = m->msg_iovlen * sizeof(struct iovec);
index 559890b0f0a2c6aaac5dc16878e2a4ff16a55933..ef31fef25e5a872eb6565c1d57953d0c6558d2c7 100644 (file)
@@ -2249,7 +2249,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
        ndm->ndm_pad1    = 0;
        ndm->ndm_pad2    = 0;
        ndm->ndm_flags   = pn->flags | NTF_PROXY;
-       ndm->ndm_type    = NDA_DST;
+       ndm->ndm_type    = RTN_UNICAST;
        ndm->ndm_ifindex = pn->dev->ifindex;
        ndm->ndm_state   = NUD_NONE;
 
index 3162ea923dedba7021cd67d24d721f94bf2342d0..190199851c9abbd0017f8e6c7134283505f6ceff 100644 (file)
@@ -457,8 +457,31 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
        return neigh_create(&arp_tbl, pkey, dev);
 }
 
-atomic_t *ip_idents __read_mostly;
-EXPORT_SYMBOL(ip_idents);
+#define IP_IDENTS_SZ 2048u
+struct ip_ident_bucket {
+       atomic_t        id;
+       u32             stamp32;
+};
+
+static struct ip_ident_bucket *ip_idents __read_mostly;
+
+/* In order to protect privacy, we add a perturbation to identifiers
+ * if one generator is seldom used. This makes hard for an attacker
+ * to infer how many packets were sent between two points in time.
+ */
+u32 ip_idents_reserve(u32 hash, int segs)
+{
+       struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
+       u32 old = ACCESS_ONCE(bucket->stamp32);
+       u32 now = (u32)jiffies;
+       u32 delta = 0;
+
+       if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
+               delta = prandom_u32_max(now - old);
+
+       return atomic_add_return(segs + delta, &bucket->id) - segs;
+}
+EXPORT_SYMBOL(ip_idents_reserve);
 
 void __ip_select_ident(struct iphdr *iph, int segs)
 {
@@ -467,7 +490,10 @@ void __ip_select_ident(struct iphdr *iph, int segs)
 
        net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
 
-       hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd);
+       hash = jhash_3words((__force u32)iph->daddr,
+                           (__force u32)iph->saddr,
+                           iph->protocol,
+                           ip_idents_hashrnd);
        id = ip_idents_reserve(hash, segs);
        iph->id = htons(id);
 }
index cb9df0eb40237065e696dd1013180b6c82a9c2dc..45702b8cd141d2c850741184c2efc53d3a510c73 100644 (file)
@@ -545,6 +545,8 @@ static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
        net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
 
        hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
+       hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
+
        id = ip_idents_reserve(hash, 1);
        fhdr->identification = htonl(id);
 }
index d7513a503be11b180031342dcf316450fd6c69d3..592f4b152ba8a51e4d1c3b3bc3360286959ff1e0 100644 (file)
@@ -472,12 +472,15 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        struct ieee80211_local *local = sdata->local;
-       struct rate_control_ref *ref = local->rate_ctrl;
+       struct rate_control_ref *ref = NULL;
        struct timespec uptime;
        u64 packets = 0;
        u32 thr = 0;
        int i, ac;
 
+       if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
+               ref = local->rate_ctrl;
+
        sinfo->generation = sdata->local->sta_generation;
 
        sinfo->filled = STATION_INFO_INACTIVE_TIME |
index 5214686d9fd1ec9ab4bc1e2a466532bd3c829c10..1a252c606ad014d8d2e7558e57e0335f9f055ad7 100644 (file)
@@ -414,6 +414,9 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
        if (ieee80211_has_order(hdr->frame_control))
                return TX_CONTINUE;
 
+       if (ieee80211_is_probe_req(hdr->frame_control))
+               return TX_CONTINUE;
+
        if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
                info->hw_queue = tx->sdata->vif.cab_queue;
 
@@ -463,6 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
 {
        struct sta_info *sta = tx->sta;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
        struct ieee80211_local *local = tx->local;
 
        if (unlikely(!sta))
@@ -473,6 +477,12 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
                     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
                int ac = skb_get_queue_mapping(tx->skb);
 
+               if (ieee80211_is_mgmt(hdr->frame_control) &&
+                   !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
+                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+                       return TX_CONTINUE;
+               }
+
                ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
                       sta->sta.addr, sta->sta.aid, ac);
                if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -531,19 +541,9 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
-
        if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
                return TX_CONTINUE;
 
-       if (ieee80211_is_mgmt(hdr->frame_control) &&
-           !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
-               if (tx->flags & IEEE80211_TX_UNICAST)
-                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
-               return TX_CONTINUE;
-       }
-
        if (tx->flags & IEEE80211_TX_UNICAST)
                return ieee80211_tx_h_unicast_ps_buf(tx);
        else
index a8eb0a89326ab504ec83421ea7b66aba3ab3a057..610e19c0e13fc82b15eb3fa1dd8328df29d9baa2 100644 (file)
@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
                        ip_vs_control_del(cp);
 
                if (cp->flags & IP_VS_CONN_F_NFCT) {
-                       ip_vs_conn_drop_conntrack(cp);
                        /* Do not access conntracks during subsys cleanup
                         * because nf_conntrack_find_get can not be used after
                         * conntrack cleanup for the net.
index 9de23a222d3f7b9f33e650483d749d3341c51dc0..06a9ee6b2d3a577a73b62028a711f5f801fe13f0 100644 (file)
@@ -1097,6 +1097,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
        asoc->c = new->c;
        asoc->peer.rwnd = new->peer.rwnd;
        asoc->peer.sack_needed = new->peer.sack_needed;
+       asoc->peer.auth_capable = new->peer.auth_capable;
        asoc->peer.i = new->peer.i;
        sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
                         asoc->peer.i.initial_tsn, GFP_ATOMIC);
index 560ed77084e92b52cae0f299ca383eef240a42e6..7cc887f9da11d74e3dd5ac5592ccb7f65b900771 100644 (file)
@@ -2094,7 +2094,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
                MAC_ASSIGN(addr, addr);
                __entry->key_type = key_type;
                __entry->key_id = key_id;
-               memcpy(__entry->tsc, tsc, 6);
+               if (tsc)
+                       memcpy(__entry->tsc, tsc, 6);
        ),
        TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
                  NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
index a8ef5108e0d86cbc5c411f3db378fde5a0d54f18..0525d78ba32866c64c0b31bcf5b9d147855c308a 100644 (file)
@@ -2097,6 +2097,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                                goto no_transform;
                        }
 
+                       dst_hold(&xdst->u.dst);
+                       xdst->u.dst.flags |= DST_NOCACHE;
                        route = xdst->route;
                }
        }
index 412d9dc3a8731ec580d4b6596fec491fb10a2a48..d4db6ebb089d0dc4ba2abefd44df0123fa3148dd 100644 (file)
@@ -177,9 +177,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                    attrs[XFRMA_ALG_AEAD]       ||
                    attrs[XFRMA_ALG_CRYPT]      ||
                    attrs[XFRMA_ALG_COMP]       ||
-                   attrs[XFRMA_TFCPAD]         ||
-                   (ntohl(p->id.spi) >= 0x10000))
-
+                   attrs[XFRMA_TFCPAD])
                        goto out;
                break;
 
@@ -207,7 +205,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                    attrs[XFRMA_ALG_AUTH]       ||
                    attrs[XFRMA_ALG_AUTH_TRUNC] ||
                    attrs[XFRMA_ALG_CRYPT]      ||
-                   attrs[XFRMA_TFCPAD])
+                   attrs[XFRMA_TFCPAD]         ||
+                   (ntohl(p->id.spi) >= 0x10000))
                        goto out;
                break;
 
index 6af50eb80ea7517680dbb7b18981eb9906fde5ae..70faa3a325264a68511a2edca0efa225ebe40942 100644 (file)
@@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
        struct special_params *params = bebob->maudio_special_quirk;
        int err, id;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
        if (id >= ARRAY_SIZE(special_clk_labels))
-               return 0;
+               return -EINVAL;
+
+       mutex_lock(&bebob->mutex);
 
        err = avc_maudio_set_special_clk(bebob, id,
                                         params->dig_in_fmt,
@@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
                                         params->clk_lock);
        mutex_unlock(&bebob->mutex);
 
-       return err >= 0;
+       if (err >= 0)
+               err = 1;
+
+       return err;
 }
 static struct snd_kcontrol_new special_clk_ctl = {
        .name   = "Clock Source",
@@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = {
        .get    = special_sync_ctl_get,
 };
 
-/* Digital interface control for special firmware */
-static char *const special_dig_iface_labels[] = {
+/* Digital input interface control for special firmware */
+static char *const special_dig_in_iface_labels[] = {
        "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
 };
 static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
 {
        einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
        einf->count = 1;
-       einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels);
+       einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
 
        if (einf->value.enumerated.item >= einf->value.enumerated.items)
                einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
        strcpy(einf->value.enumerated.name,
-              special_dig_iface_labels[einf->value.enumerated.item]);
+              special_dig_in_iface_labels[einf->value.enumerated.item]);
 
        return 0;
 }
@@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl,
        unsigned int id, dig_in_fmt, dig_in_iface;
        int err;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
+       if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
+               return -EINVAL;
 
        /* decode user value */
        dig_in_fmt = (id >> 1) & 0x01;
        dig_in_iface = id & 0x01;
 
+       mutex_lock(&bebob->mutex);
+
        err = avc_maudio_set_special_clk(bebob,
                                         params->clk_src,
                                         dig_in_fmt,
                                         params->dig_out_fmt,
                                         params->clk_lock);
-       if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */
+       if (err < 0)
+               goto end;
+
+       /* For ADAT, optical interface is only available. */
+       if (params->dig_in_fmt > 0) {
+               err = 1;
                goto end;
+       }
 
+       /* For S/PDIF, optical/coaxial interfaces are selectable. */
        err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
        if (err < 0)
                dev_err(&bebob->unit->device,
                        "fail to set digital input interface: %d\n", err);
+       err = 1;
 end:
        special_stream_formation_set(bebob);
        mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
        .put    = special_dig_in_iface_ctl_set
 };
 
+/* Digital output interface control for special firmware */
+static char *const special_dig_out_iface_labels[] = {
+       "S/PDIF Optical and Coaxial", "ADAT Optical"
+};
 static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
                                          struct snd_ctl_elem_info *einf)
 {
        einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
        einf->count = 1;
-       einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1;
+       einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
 
        if (einf->value.enumerated.item >= einf->value.enumerated.items)
                einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
        strcpy(einf->value.enumerated.name,
-              special_dig_iface_labels[einf->value.enumerated.item + 1]);
+              special_dig_out_iface_labels[einf->value.enumerated.item]);
 
        return 0;
 }
@@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
        unsigned int id;
        int err;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
+       if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
+               return -EINVAL;
+
+       mutex_lock(&bebob->mutex);
 
        err = avc_maudio_set_special_clk(bebob,
                                         params->clk_src,
                                         params->dig_in_fmt,
                                         id, params->clk_lock);
-       if (err >= 0)
+       if (err >= 0) {
                special_stream_formation_set(bebob);
+               err = 1;
+       }
 
        mutex_unlock(&bebob->mutex);
        return err;
index 56ff9bebb577df935200aacfc1e8251ae0800bcc..476d3bf540a85e2fa1bb68b19d98fbb77b3da6d0 100644 (file)
@@ -1526,17 +1526,33 @@ int kvm_vgic_hyp_init(void)
                goto out_unmap;
        }
 
-       kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
-                vctrl_res.start, vgic_maint_irq);
-       on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
-
        if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
                kvm_err("Cannot obtain VCPU resource\n");
                ret = -ENXIO;
                goto out_unmap;
        }
+
+       if (!PAGE_ALIGNED(vcpu_res.start)) {
+               kvm_err("GICV physical address 0x%llx not page aligned\n",
+                       (unsigned long long)vcpu_res.start);
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
+       if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+               kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+                       (unsigned long long)resource_size(&vcpu_res),
+                       PAGE_SIZE);
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
        vgic_vcpu_base = vcpu_res.start;
 
+       kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+                vctrl_res.start, vgic_maint_irq);
+       on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+
        goto out;
 
 out_unmap: