]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'sound-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Nov 2013 19:23:22 +0000 (12:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Nov 2013 19:23:22 +0000 (12:23 -0700)
Pull more sound fixes from Takashi Iwai:
 "The fixes for random bugs that have been reported lately in the game:
  a few fixes in ASoC dpam and wm_hubs bugs spotted by Coverity, a
  one-liner HD-audio fixup, and a fix for Oops with DPCM.

  They are not so critically urgent bugs, but all small and safe"

* tag 'sound-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound:
  ALSA: fix oops in snd_pcm_info() caused by ASoC DPCM
  ASoC: wm_hubs: Add missing break in hp_supply_event()
  ALSA: hda - Add a fixup for ASUS N76VZ
  ASoC: dapm: Return -ENOMEM in snd_soc_dapm_new_dai_widgets()
  ASoC: dapm: Fix source list debugfs outputs

74 files changed:
arch/um/kernel/exitcode.c
arch/x86/include/asm/percpu.h
arch/x86/kernel/kvm.c
arch/xtensa/kernel/entry.S
arch/xtensa/kernel/signal.c
arch/xtensa/platforms/iss/network.c
drivers/clk/clk-nomadik.c
drivers/clk/mvebu/armada-370.c
drivers/clk/socfpga/clk.c
drivers/clk/versatile/clk-icst.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/radeon.h
drivers/input/input.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/misc/cm109.c
drivers/input/mouse/alps.c
drivers/input/serio/i8042.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/scsi/aacraid/linit.c
drivers/scsi/sg.c
drivers/staging/bcm/Bcmchar.c
drivers/staging/ozwpan/ozcdev.c
drivers/staging/sb105x/sb_pci_mp.c
drivers/staging/wlags49_h2/wl_priv.c
drivers/tty/serial/atmel_serial.c
drivers/uio/uio.c
drivers/video/au1100fb.c
drivers/video/au1200fb.c
fs/dcache.c
fs/eventpoll.c
fs/select.c
include/linux/percpu.h
lib/Kconfig.debug
lib/scatterlist.c
mm/huge_memory.c
mm/list_lru.c
mm/memcontrol.c
mm/memory.c
mm/migrate.c
mm/mprotect.c
mm/pagewalk.c
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-top.txt
tools/perf/builtin-kvm.c
tools/perf/builtin-record.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/tests/code-reading.c
tools/perf/tests/keep-tracking.c
tools/perf/tests/mmap-basic.c
tools/perf/tests/open-syscall-tp-fields.c
tools/perf/tests/perf-record.c
tools/perf/tests/perf-time-to-tsc.c
tools/perf/tests/sw-clock.c
tools/perf/tests/task-exit.c
tools/perf/ui/stdio/hist.c
tools/perf/util/callchain.h
tools/perf/util/event.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/hist.h
tools/perf/util/python.c
tools/perf/util/scripting-engines/trace-event-python.c
virt/kvm/kvm_main.c

index 829df49dee99c6093d5586d0e2c19e34786dfa97..41ebbfebb3332f2bc7269a754ff96b377e17212b 100644 (file)
@@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
                const char __user *buffer, size_t count, loff_t *pos)
 {
        char *end, buf[sizeof("nnnnn\0")];
+       size_t size;
        int tmp;
 
-       if (copy_from_user(buf, buffer, count))
+       size = min(count, sizeof(buf));
+       if (copy_from_user(buf, buffer, size))
                return -EFAULT;
 
        tmp = simple_strtol(buf, &end, 0);
index 0da5200ee79d149bdefe5aaddf917a536c85caa4..b3e18f800302ffee529e9aa99a22623118600042 100644 (file)
@@ -128,7 +128,8 @@ do {                                                        \
 do {                                                                   \
        typedef typeof(var) pao_T__;                                    \
        const int pao_ID__ = (__builtin_constant_p(val) &&              \
-                             ((val) == 1 || (val) == -1)) ? (val) : 0; \
+                             ((val) == 1 || (val) == -1)) ?            \
+                               (int)(val) : 0;                         \
        if (0) {                                                        \
                pao_T__ pao_tmp__;                                      \
                pao_tmp__ = (val);                                      \
index a0e2a8a80c94129bb3d3d47bf975ef68f98f500e..b2046e4d0b59e9b2b50720783b2aec4145012985 100644 (file)
@@ -609,7 +609,7 @@ static struct dentry *d_kvm_debug;
 
 struct dentry *kvm_init_debugfs(void)
 {
-       d_kvm_debug = debugfs_create_dir("kvm", NULL);
+       d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
        if (!d_kvm_debug)
                printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
 
index de1dfa18d0a11953c037af6c276603c20b1e346f..21dbe6bdb8edc661d729f656a02a5f1cf01a18c1 100644 (file)
@@ -1122,7 +1122,7 @@ ENDPROC(fast_syscall_spill_registers)
  * a3: exctable, original value in excsave1
  */
 
-fast_syscall_spill_registers_fixup:
+ENTRY(fast_syscall_spill_registers_fixup)
 
        rsr     a2, windowbase  # get current windowbase (a2 is saved)
        xsr     a0, depc        # restore depc and a0
@@ -1134,22 +1134,26 @@ fast_syscall_spill_registers_fixup:
         */
 
        xsr     a3, excsave1    # get spill-mask
-       slli    a2, a3, 1       # shift left by one
+       slli    a3, a3, 1       # shift left by one
 
-       slli    a3, a2, 32-WSBITS
-       src     a2, a2, a3      # a1 = xxwww1yyxxxwww1yy......
+       slli    a2, a3, 32-WSBITS
+       src     a2, a3, a2      # a2 = xxwww1yyxxxwww1yy......
        wsr     a2, windowstart # set corrected windowstart
 
-       rsr     a3, excsave1
-       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE   # restore a2
-       l32i    a3, a3, EXC_TABLE_PARAM # original WB (in user task)
+       srli    a3, a3, 1
+       rsr     a2, excsave1
+       l32i    a2, a2, EXC_TABLE_DOUBLE_SAVE   # restore a2
+       xsr     a2, excsave1
+       s32i    a3, a2, EXC_TABLE_DOUBLE_SAVE   # save a3
+       l32i    a3, a2, EXC_TABLE_PARAM # original WB (in user task)
+       xsr     a2, excsave1
 
        /* Return to the original (user task) WINDOWBASE.
         * We leave the following frame behind:
         * a0, a1, a2   same
-        * a3:          trashed (saved in excsave_1)
+        * a3:          trashed (saved in EXC_TABLE_DOUBLE_SAVE)
         * depc:        depc (we have to return to that address)
-        * excsave_1:   a3
+        * excsave_1:   exctable
         */
 
        wsr     a3, windowbase
@@ -1159,9 +1163,9 @@ fast_syscall_spill_registers_fixup:
         *  a0: return address
         *  a1: used, stack pointer
         *  a2: kernel stack pointer
-        *  a3: available, saved in EXCSAVE_1
+        *  a3: available
         *  depc: exception address
-        *  excsave: a3
+        *  excsave: exctable
         * Note: This frame might be the same as above.
         */
 
@@ -1181,9 +1185,12 @@ fast_syscall_spill_registers_fixup:
        rsr     a0, exccause
        addx4   a0, a0, a3                      # find entry in table
        l32i    a0, a0, EXC_TABLE_FAST_USER     # load handler
+       l32i    a3, a3, EXC_TABLE_DOUBLE_SAVE
        jx      a0
 
-fast_syscall_spill_registers_fixup_return:
+ENDPROC(fast_syscall_spill_registers_fixup)
+
+ENTRY(fast_syscall_spill_registers_fixup_return)
 
        /* When we return here, all registers have been restored (a2: DEPC) */
 
@@ -1191,13 +1198,13 @@ fast_syscall_spill_registers_fixup_return:
 
        /* Restore fixup handler. */
 
-       xsr     a3, excsave1
-       movi    a2, fast_syscall_spill_registers_fixup
-       s32i    a2, a3, EXC_TABLE_FIXUP
-       s32i    a0, a3, EXC_TABLE_DOUBLE_SAVE
-       rsr     a2, windowbase
-       s32i    a2, a3, EXC_TABLE_PARAM
-       l32i    a2, a3, EXC_TABLE_KSTK
+       rsr     a2, excsave1
+       s32i    a3, a2, EXC_TABLE_DOUBLE_SAVE
+       movi    a3, fast_syscall_spill_registers_fixup
+       s32i    a3, a2, EXC_TABLE_FIXUP
+       rsr     a3, windowbase
+       s32i    a3, a2, EXC_TABLE_PARAM
+       l32i    a2, a2, EXC_TABLE_KSTK
 
        /* Load WB at the time the exception occurred. */
 
@@ -1206,8 +1213,12 @@ fast_syscall_spill_registers_fixup_return:
        wsr     a3, windowbase
        rsync
 
+       rsr     a3, excsave1
+       l32i    a3, a3, EXC_TABLE_DOUBLE_SAVE
+
        rfde
 
+ENDPROC(fast_syscall_spill_registers_fixup_return)
 
 /*
  * spill all registers.
index 718eca1850bd3533762aa64914d1a33e544d43ce..98b67d5f15144659dd9ea6954ab440e964a2aae8 100644 (file)
@@ -341,7 +341,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 
        sp = regs->areg[1];
 
-       if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
+       if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
                sp = current->sas_ss_sp + current->sas_ss_size;
        }
 
index 56f88b7afe2fd907798cd41023133e11c37cf7c8..e9e1aad8c271048d4fde5bdac7ebef7e1cc79a96 100644 (file)
@@ -737,7 +737,8 @@ static int __init iss_net_setup(char *str)
                return 1;
        }
 
-       if ((new = alloc_bootmem(sizeof new)) == NULL) {
+       new = alloc_bootmem(sizeof(*new));
+       if (new == NULL) {
                printk("Alloc_bootmem failed\n");
                return 1;
        }
index 51410c2ac2cb617b9a98b9c0fd869e1ddd541870..4d978a3c88f743faac72af94b27cd72070507bf9 100644 (file)
  */
 
 #define SRC_CR                 0x00U
+#define SRC_CR_T0_ENSEL                BIT(15)
+#define SRC_CR_T1_ENSEL                BIT(17)
+#define SRC_CR_T2_ENSEL                BIT(19)
+#define SRC_CR_T3_ENSEL                BIT(21)
+#define SRC_CR_T4_ENSEL                BIT(23)
+#define SRC_CR_T5_ENSEL                BIT(25)
+#define SRC_CR_T6_ENSEL                BIT(27)
+#define SRC_CR_T7_ENSEL                BIT(29)
 #define SRC_XTALCR             0x0CU
 #define SRC_XTALCR_XTALTIMEN   BIT(20)
 #define SRC_XTALCR_SXTALDIS    BIT(19)
@@ -543,6 +551,19 @@ void __init nomadik_clk_init(void)
                       __func__, np->name);
                return;
        }
+
+       /* Set all timers to use the 2.4 MHz TIMCLK */
+       val = readl(src_base + SRC_CR);
+       val |= SRC_CR_T0_ENSEL;
+       val |= SRC_CR_T1_ENSEL;
+       val |= SRC_CR_T2_ENSEL;
+       val |= SRC_CR_T3_ENSEL;
+       val |= SRC_CR_T4_ENSEL;
+       val |= SRC_CR_T5_ENSEL;
+       val |= SRC_CR_T6_ENSEL;
+       val |= SRC_CR_T7_ENSEL;
+       writel(val, src_base + SRC_CR);
+
        val = readl(src_base + SRC_XTALCR);
        pr_info("SXTALO is %s\n",
                (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
index fc777bdc1886586d2f7e0e253ddc1e78758d4927..81a202d12a7ad89ab56909fce6782baa7fb94ec2 100644 (file)
@@ -39,8 +39,8 @@ static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = {
 };
 
 static const u32 a370_tclk_freqs[] __initconst = {
-       16600000,
-       20000000,
+       166000000,
+       200000000,
 };
 
 static u32 __init a370_get_tclk_freq(void __iomem *sar)
index 5bb848cac6ece40ab0e5bceef82d4facafdbd80d..81dd31a686df9e467b7c111f9c808ee568139551 100644 (file)
@@ -49,7 +49,7 @@
 #define SOCFPGA_L4_SP_CLK              "l4_sp_clk"
 #define SOCFPGA_NAND_CLK               "nand_clk"
 #define SOCFPGA_NAND_X_CLK             "nand_x_clk"
-#define SOCFPGA_MMC_CLK                        "mmc_clk"
+#define SOCFPGA_MMC_CLK                        "sdmmc_clk"
 #define SOCFPGA_DB_CLK                 "gpio_db_clk"
 
 #define div_mask(width)        ((1 << (width)) - 1)
index 67ccf4aa72773520baa89708c30737c3ad13c493..f5e4c21b301f6438c32d3552cae4bb9aef9d2a44 100644 (file)
@@ -107,7 +107,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
 
        vco = icst_hz_to_vco(icst->params, rate);
        icst->rate = icst_hz(icst->params, vco);
-       vco_set(icst->vcoreg, icst->lockreg, vco);
+       vco_set(icst->lockreg, icst->vcoreg, vco);
        return 0;
 }
 
index 05ad9ba0a67e8ab4c9e79aade5b8ead441b7f3a8..fe58d0833a11c74f7b33b7ac5c169492f4a066fc 100644 (file)
@@ -61,7 +61,7 @@ static int drm_version(struct drm_device *dev, void *data,
 
 /** Ioctl table */
 static const struct drm_ioctl_desc drm_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
index ea9022ef15d5bdffc40e5c0bf9a941c7f0b38677..10d1de5bce6ff7a35921fa19d1327863b85ab86d 100644 (file)
@@ -83,8 +83,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
        return true;
 }
 
-static void intel_crt_get_config(struct intel_encoder *encoder,
-                                struct intel_crtc_config *pipe_config)
+static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -102,7 +101,25 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
        else
                flags |= DRM_MODE_FLAG_NVSYNC;
 
-       pipe_config->adjusted_mode.flags |= flags;
+       return flags;
+}
+
+static void intel_crt_get_config(struct intel_encoder *encoder,
+                                struct intel_crtc_config *pipe_config)
+{
+       pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+}
+
+static void hsw_crt_get_config(struct intel_encoder *encoder,
+                              struct intel_crtc_config *pipe_config)
+{
+       intel_ddi_get_config(encoder, pipe_config);
+
+       pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+                                             DRM_MODE_FLAG_NHSYNC |
+                                             DRM_MODE_FLAG_PVSYNC |
+                                             DRM_MODE_FLAG_NVSYNC);
+       pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
 }
 
 /* Note: The caller is required to filter out dpms modes not supported by the
@@ -799,7 +816,10 @@ void intel_crt_init(struct drm_device *dev)
        crt->base.mode_set = intel_crt_mode_set;
        crt->base.disable = intel_disable_crt;
        crt->base.enable = intel_enable_crt;
-       crt->base.get_config = intel_crt_get_config;
+       if (IS_HASWELL(dev))
+               crt->base.get_config = hsw_crt_get_config;
+       else
+               crt->base.get_config = intel_crt_get_config;
        if (I915_HAS_HOTPLUG(dev))
                crt->base.hpd_pin = HPD_CRT;
        if (HAS_DDI(dev))
index 63de2701b97403a82ffd221424d5b5b9acda5843..b53fff84a7d5b5521e1a34c6749d1f91a5af94e4 100644 (file)
@@ -1249,8 +1249,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
                intel_dp_check_link_status(intel_dp);
 }
 
-static void intel_ddi_get_config(struct intel_encoder *encoder,
-                                struct intel_crtc_config *pipe_config)
+void intel_ddi_get_config(struct intel_encoder *encoder,
+                         struct intel_crtc_config *pipe_config)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1268,6 +1268,23 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
                flags |= DRM_MODE_FLAG_NVSYNC;
 
        pipe_config->adjusted_mode.flags |= flags;
+
+       switch (temp & TRANS_DDI_BPC_MASK) {
+       case TRANS_DDI_BPC_6:
+               pipe_config->pipe_bpp = 18;
+               break;
+       case TRANS_DDI_BPC_8:
+               pipe_config->pipe_bpp = 24;
+               break;
+       case TRANS_DDI_BPC_10:
+               pipe_config->pipe_bpp = 30;
+               break;
+       case TRANS_DDI_BPC_12:
+               pipe_config->pipe_bpp = 36;
+               break;
+       default:
+               break;
+       }
 }
 
 static void intel_ddi_destroy(struct drm_encoder *encoder)
index 581fb4b2f76637694877a2c4401acd2f35ade58c..d78d33f9337d993472b22f82f3e2749d28a3e38e 100644 (file)
@@ -2327,9 +2327,10 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
                           FDI_FE_ERRC_ENABLE);
 }
 
-static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
+static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
 {
-       return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
+       return crtc->base.enabled && crtc->active &&
+               crtc->config.has_pch_encoder;
 }
 
 static void ivb_modeset_global_resources(struct drm_device *dev)
@@ -2979,6 +2980,48 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
                   I915_READ(VSYNCSHIFT(cpu_transcoder)));
 }
 
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t temp;
+
+       temp = I915_READ(SOUTH_CHICKEN1);
+       if (temp & FDI_BC_BIFURCATION_SELECT)
+               return;
+
+       WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+       WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+       temp |= FDI_BC_BIFURCATION_SELECT;
+       DRM_DEBUG_KMS("enabling fdi C rx\n");
+       I915_WRITE(SOUTH_CHICKEN1, temp);
+       POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       switch (intel_crtc->pipe) {
+       case PIPE_A:
+               break;
+       case PIPE_B:
+               if (intel_crtc->config.fdi_lanes > 2)
+                       WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+               else
+                       cpt_enable_fdi_bc_bifurcation(dev);
+
+               break;
+       case PIPE_C:
+               cpt_enable_fdi_bc_bifurcation(dev);
+
+               break;
+       default:
+               BUG();
+       }
+}
+
 /*
  * Enable PCH resources required for PCH ports:
  *   - PCH PLLs
@@ -2997,6 +3040,9 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 
        assert_pch_transcoder_disabled(dev_priv, pipe);
 
+       if (IS_IVYBRIDGE(dev))
+               ivybridge_update_fdi_bc_bifurcation(intel_crtc);
+
        /* Write the TU size bits before fdi link training, so that error
         * detection works. */
        I915_WRITE(FDI_RX_TUSIZE1(pipe),
@@ -4983,6 +5029,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        if (!(tmp & PIPECONF_ENABLE))
                return false;
 
+       if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+               switch (tmp & PIPECONF_BPC_MASK) {
+               case PIPECONF_6BPC:
+                       pipe_config->pipe_bpp = 18;
+                       break;
+               case PIPECONF_8BPC:
+                       pipe_config->pipe_bpp = 24;
+                       break;
+               case PIPECONF_10BPC:
+                       pipe_config->pipe_bpp = 30;
+                       break;
+               default:
+                       break;
+               }
+       }
+
        intel_get_pipe_timings(crtc, pipe_config);
 
        i9xx_get_pfit_config(crtc, pipe_config);
@@ -5576,48 +5638,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
        return true;
 }
 
-static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t temp;
-
-       temp = I915_READ(SOUTH_CHICKEN1);
-       if (temp & FDI_BC_BIFURCATION_SELECT)
-               return;
-
-       WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
-       WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
-
-       temp |= FDI_BC_BIFURCATION_SELECT;
-       DRM_DEBUG_KMS("enabling fdi C rx\n");
-       I915_WRITE(SOUTH_CHICKEN1, temp);
-       POSTING_READ(SOUTH_CHICKEN1);
-}
-
-static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
-{
-       struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       switch (intel_crtc->pipe) {
-       case PIPE_A:
-               break;
-       case PIPE_B:
-               if (intel_crtc->config.fdi_lanes > 2)
-                       WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
-               else
-                       cpt_enable_fdi_bc_bifurcation(dev);
-
-               break;
-       case PIPE_C:
-               cpt_enable_fdi_bc_bifurcation(dev);
-
-               break;
-       default:
-               BUG();
-       }
-}
-
 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
 {
        /*
@@ -5811,9 +5831,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
                                             &intel_crtc->config.fdi_m_n);
        }
 
-       if (IS_IVYBRIDGE(dev))
-               ivybridge_update_fdi_bc_bifurcation(intel_crtc);
-
        ironlake_set_pipeconf(crtc);
 
        /* Set up the display plane register */
@@ -5881,6 +5898,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        if (!(tmp & PIPECONF_ENABLE))
                return false;
 
+       switch (tmp & PIPECONF_BPC_MASK) {
+       case PIPECONF_6BPC:
+               pipe_config->pipe_bpp = 18;
+               break;
+       case PIPECONF_8BPC:
+               pipe_config->pipe_bpp = 24;
+               break;
+       case PIPECONF_10BPC:
+               pipe_config->pipe_bpp = 30;
+               break;
+       case PIPECONF_12BPC:
+               pipe_config->pipe_bpp = 36;
+               break;
+       default:
+               break;
+       }
+
        if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
                struct intel_shared_dpll *pll;
 
@@ -8612,6 +8646,9 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
 
+       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+               PIPE_CONF_CHECK_I(pipe_bpp);
+
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
 #undef PIPE_CONF_CHECK_FLAGS
index 2c555f91bfae076688fe07e1694c75932a9c97d2..1a431377d83b76ad22bccf795db770ab3b40bd3e 100644 (file)
@@ -1401,6 +1401,26 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
                else
                        pipe_config->port_clock = 270000;
        }
+
+       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+           pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+               /*
+                * This is a big fat ugly hack.
+                *
+                * Some machines in UEFI boot mode provide us a VBT that has 18
+                * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+                * unknown we fail to light up. Yet the same BIOS boots up with
+                * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+                * max, not what it tells us to use.
+                *
+                * Note: This will still be broken if the eDP panel is not lit
+                * up by the BIOS, and thus we can't get the mode at module
+                * load.
+                */
+               DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+                             pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+               dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+       }
 }
 
 static bool is_edp_psr(struct intel_dp *intel_dp)
index 9b7b68fd5d47cf6c979c8fdffd412d8381aabc0e..7f2b384ac939834fe8e024df14a3e47e4e82ad18 100644 (file)
@@ -765,6 +765,8 @@ extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
 extern bool
 intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
 extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+extern void intel_ddi_get_config(struct intel_encoder *encoder,
+                                struct intel_crtc_config *pipe_config);
 
 extern void intel_display_handle_reset(struct drm_device *dev);
 extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
index 831a5c021c4bdefd2495ca0736d8fcb3f78ff57d..b8af94a5be390610b360b5b27c3fe6e01d002081 100644 (file)
@@ -698,6 +698,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Intel D410PT",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+                       DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
+               },
+       },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Intel D425KT",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
+               },
+       },
        {
                .callback = intel_no_lvds_dmi_callback,
                .ident = "Intel D510MO",
index fe1de855775ecca1491ced31f797f5cac6d05abc..57fcc4b16a526d166fd6be21955a86d5ee7d87dc 100644 (file)
@@ -291,6 +291,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
        /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
 
        WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+              HDMI_ACR_SOURCE | /* select SW CTS value */
               HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
 
        evergreen_hdmi_update_ACR(encoder, mode->clock);
index 71399065db04d309104b98ede05b69226ad3b271..b41905573cd2a431862b3684f4d43ad445ecd15d 100644 (file)
@@ -2635,7 +2635,7 @@ int kv_dpm_init(struct radeon_device *rdev)
        pi->caps_sclk_ds = true;
        pi->enable_auto_thermal_throttling = true;
        pi->disable_nb_ps3_in_battery = false;
-       pi->bapm_enable = true;
+       pi->bapm_enable = false;
        pi->voltage_drop_t = 0;
        pi->caps_sclk_throttle_low_notification = false;
        pi->caps_fps = false; /* true? */
index a400ac1c414715423064874d37f047b426f7fe19..24f4960f59ee57be931f3f5301528a859469a016 100644 (file)
@@ -1272,8 +1272,8 @@ struct radeon_blacklist_clocks
 struct radeon_clock_and_voltage_limits {
        u32 sclk;
        u32 mclk;
-       u32 vddc;
-       u32 vddci;
+       u16 vddc;
+       u16 vddci;
 };
 
 struct radeon_clock_array {
index c0446992892533b24d3853d043cf088451db5662..e75d015024a15c7fb6eb3074299e234a69916594 100644 (file)
@@ -1734,6 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
  */
 struct input_dev *input_allocate_device(void)
 {
+       static atomic_t input_no = ATOMIC_INIT(0);
        struct input_dev *dev;
 
        dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1743,9 +1744,13 @@ struct input_dev *input_allocate_device(void)
                device_initialize(&dev->dev);
                mutex_init(&dev->mutex);
                spin_lock_init(&dev->event_lock);
+               init_timer(&dev->timer);
                INIT_LIST_HEAD(&dev->h_list);
                INIT_LIST_HEAD(&dev->node);
 
+               dev_set_name(&dev->dev, "input%ld",
+                            (unsigned long) atomic_inc_return(&input_no) - 1);
+
                __module_get(THIS_MODULE);
        }
 
@@ -2019,7 +2024,6 @@ static void devm_input_device_unregister(struct device *dev, void *res)
  */
 int input_register_device(struct input_dev *dev)
 {
-       static atomic_t input_no = ATOMIC_INIT(0);
        struct input_devres *devres = NULL;
        struct input_handler *handler;
        unsigned int packet_size;
@@ -2059,7 +2063,6 @@ int input_register_device(struct input_dev *dev)
         * If delay and period are pre-set by the driver, then autorepeating
         * is handled by the driver itself and we don't do it in input.c.
         */
-       init_timer(&dev->timer);
        if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) {
                dev->timer.data = (long) dev;
                dev->timer.function = input_repeat_key;
@@ -2073,9 +2076,6 @@ int input_register_device(struct input_dev *dev)
        if (!dev->setkeycode)
                dev->setkeycode = input_default_setkeycode;
 
-       dev_set_name(&dev->dev, "input%ld",
-                    (unsigned long) atomic_inc_return(&input_no) - 1);
-
        error = device_add(&dev->dev);
        if (error)
                goto err_free_vals;
index 134c3b404a54d22a5b7b425c22fc88ea4949cecf..a2e758d27584c0117a5c941d36b8042ab20f425f 100644 (file)
@@ -786,10 +786,17 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
        input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
        input_set_capability(input_dev, EV_MSC, MSC_SCAN);
 
-       if (pdata)
+       if (pdata) {
                error = pxa27x_keypad_build_keycode(keypad);
-       else
+       } else {
                error = pxa27x_keypad_build_keycode_from_dt(keypad);
+               /*
+                * Data that we get from DT resides in dynamically
+                * allocated memory so we need to update our pdata
+                * pointer.
+                */
+               pdata = keypad->pdata;
+       }
        if (error) {
                dev_err(&pdev->dev, "failed to build keycode\n");
                goto failed_put_clk;
index 082684e7f390f7d9f50201df3e5ddd325f2d0256..9365535ba7f157b98138f6f0defb14d9e1ae782a 100644 (file)
@@ -351,7 +351,9 @@ static void cm109_urb_irq_callback(struct urb *urb)
        if (status) {
                if (status == -ESHUTDOWN)
                        return;
-               dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status);
+               dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
+                                   __func__, status);
+               goto out;
        }
 
        /* Special keys */
@@ -418,8 +420,12 @@ static void cm109_urb_ctl_callback(struct urb *urb)
             dev->ctl_data->byte[2],
             dev->ctl_data->byte[3]);
 
-       if (status)
-               dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status);
+       if (status) {
+               if (status == -ESHUTDOWN)
+                       return;
+               dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
+                                   __func__, status);
+       }
 
        spin_lock(&dev->ctl_submit_lock);
 
@@ -427,7 +433,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
 
        if (likely(!dev->shutdown)) {
 
-               if (dev->buzzer_pending) {
+               if (dev->buzzer_pending || status) {
                        dev->buzzer_pending = 0;
                        dev->ctl_urb_pending = 1;
                        cm109_submit_buzz_toggle(dev);
index 7c5d72a6a26a3400fbedfcbe19e37e1abb298f1f..83658472ad2511735b2e02c8ee645858385d58f0 100644 (file)
@@ -103,6 +103,7 @@ static const struct alps_model_info alps_model_data[] = {
        /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
        { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
                ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
+       { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT },              /* Dell XT2 */
        { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },           /* Dell Vostro 1400 */
        { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
                ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },                            /* Toshiba Tecra A11-11L */
index 78e4de42efaacec53c29f740fafe490dead2af84..52c9ebf94729ff5bf6531cc7773902ece632d03b 100644 (file)
@@ -223,21 +223,26 @@ static int i8042_flush(void)
 {
        unsigned long flags;
        unsigned char data, str;
-       int i = 0;
+       int count = 0;
+       int retval = 0;
 
        spin_lock_irqsave(&i8042_lock, flags);
 
-       while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) {
-               udelay(50);
-               data = i8042_read_data();
-               i++;
-               dbg("%02x <- i8042 (flush, %s)\n",
-                   data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+       while ((str = i8042_read_status()) & I8042_STR_OBF) {
+               if (count++ < I8042_BUFFER_SIZE) {
+                       udelay(50);
+                       data = i8042_read_data();
+                       dbg("%02x <- i8042 (flush, %s)\n",
+                           data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+               } else {
+                       retval = -EIO;
+                       break;
+               }
        }
 
        spin_unlock_irqrestore(&i8042_lock, flags);
 
-       return i;
+       return retval;
 }
 
 /*
@@ -849,7 +854,7 @@ static int __init i8042_check_aux(void)
 
 static int i8042_controller_check(void)
 {
-       if (i8042_flush() == I8042_BUFFER_SIZE) {
+       if (i8042_flush()) {
                pr_err("No controller found\n");
                return -ENODEV;
        }
index 79b69ea47f747abd035d2eb44cad9d724699ddd7..e53416a4d7f3275ea2fd8f53f6a55dcbb6de02ad 100644 (file)
@@ -1031,6 +1031,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
 }
 
 static enum power_supply_property wacom_battery_props[] = {
+       POWER_SUPPLY_PROP_SCOPE,
        POWER_SUPPLY_PROP_CAPACITY
 };
 
@@ -1042,6 +1043,9 @@ static int wacom_battery_get_property(struct power_supply *psy,
        int ret = 0;
 
        switch (psp) {
+               case POWER_SUPPLY_PROP_SCOPE:
+                       val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+                       break;
                case POWER_SUPPLY_PROP_CAPACITY:
                        val->intval =
                                wacom->wacom_wac.battery_capacity * 100 / 31;
index b2aa503c16b1fb08c7d94817c533794d6d9375bd..c59b797eeafaa4f6258552389437cb8d113e1642 100644 (file)
@@ -2054,6 +2054,12 @@ static const struct wacom_features wacom_features_0x101 =
 static const struct wacom_features wacom_features_0x10D =
        { "Wacom ISDv4 10D",      WACOM_PKGLEN_MTTPC,     26202, 16325,  255,
          0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10E =
+       { "Wacom ISDv4 10E",      WACOM_PKGLEN_MTTPC,     27760, 15694,  255,
+         0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10F =
+       { "Wacom ISDv4 10F",      WACOM_PKGLEN_MTTPC,     27760, 15694,  255,
+         0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x4001 =
        { "Wacom ISDv4 4001",      WACOM_PKGLEN_MTTPC,     26202, 16325,  255,
          0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2248,6 +2254,8 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x100) },
        { USB_DEVICE_WACOM(0x101) },
        { USB_DEVICE_WACOM(0x10D) },
+       { USB_DEVICE_WACOM(0x10E) },
+       { USB_DEVICE_WACOM(0x10F) },
        { USB_DEVICE_WACOM(0x300) },
        { USB_DEVICE_WACOM(0x301) },
        { USB_DEVICE_WACOM(0x304) },
index be12fbfcae1042e90c00d192bffe46c47821b6c4..1ea75236a15fcd800006612e9cac50c0f0e48f5a 100644 (file)
@@ -552,9 +552,8 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
        struct acpiphp_func *func;
        int max, pass;
        LIST_HEAD(add_list);
-       int nr_found;
 
-       nr_found = acpiphp_rescan_slot(slot);
+       acpiphp_rescan_slot(slot);
        max = acpiphp_max_busnr(bus);
        for (pass = 0; pass < 2; pass++) {
                list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -574,9 +573,6 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
                }
        }
        __pci_bus_assign_resources(bus, &add_list, NULL);
-       /* Nothing more to do here if there are no new devices on this bus. */
-       if (!nr_found && (slot->flags & SLOT_ENABLED))
-               return;
 
        acpiphp_sanitize_bus(bus);
        acpiphp_set_hpp_values(bus);
index 408a42ef787a32b6d8fece98c717376ce93c1270..f0d432c139d0cecedf51295562c858d22a9f44f0 100644 (file)
@@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
 static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 {
        struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+       if (!capable(CAP_SYS_RAWIO))
+               return -EPERM;
        return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
 }
 
index 5cbc4bb1b395c6174e30fd0d8f73401408f526f9..df5e961484e108312f6f01765882dd4319d584f7 100644 (file)
@@ -105,8 +105,11 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
 static int sg_add(struct device *, struct class_interface *);
 static void sg_remove(struct device *, struct class_interface *);
 
+static DEFINE_SPINLOCK(sg_open_exclusive_lock);
+
 static DEFINE_IDR(sg_index_idr);
-static DEFINE_RWLOCK(sg_index_lock);
+static DEFINE_RWLOCK(sg_index_lock);   /* Also used to lock
+                                                          file descriptor list for device */
 
 static struct class_interface sg_interface = {
        .add_dev        = sg_add,
@@ -143,7 +146,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
 } Sg_request;
 
 typedef struct sg_fd {         /* holds the state of a file descriptor */
-       struct list_head sfd_siblings; /* protected by sfd_lock of device */
+       /* sfd_siblings is protected by sg_index_lock */
+       struct list_head sfd_siblings;
        struct sg_device *parentdp;     /* owning device */
        wait_queue_head_t read_wait;    /* queue read until command done */
        rwlock_t rq_list_lock;  /* protect access to list in req_arr */
@@ -166,12 +170,13 @@ typedef struct sg_fd {            /* holds the state of a file descriptor */
 
 typedef struct sg_device { /* holds the state of each scsi generic device */
        struct scsi_device *device;
+       wait_queue_head_t o_excl_wait;  /* queue open() when O_EXCL in use */
        int sg_tablesize;       /* adapter's max scatter-gather table size */
        u32 index;              /* device index number */
-       spinlock_t sfd_lock;    /* protect file descriptor list for device */
+       /* sfds is protected by sg_index_lock */
        struct list_head sfds;
-       struct rw_semaphore o_sem;      /* exclude open should hold this rwsem */
        volatile char detached; /* 0->attached, 1->detached pending removal */
+       /* exclude protected by sg_open_exclusive_lock */
        char exclude;           /* opened for exclusive access */
        char sgdebug;           /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
        struct gendisk *disk;
@@ -220,14 +225,35 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
        return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
 }
 
+static int get_exclude(Sg_device *sdp)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+       ret = sdp->exclude;
+       spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+       return ret;
+}
+
+static int set_exclude(Sg_device *sdp, char val)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+       sdp->exclude = val;
+       spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+       return val;
+}
+
 static int sfds_list_empty(Sg_device *sdp)
 {
        unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&sdp->sfd_lock, flags);
+       read_lock_irqsave(&sg_index_lock, flags);
        ret = list_empty(&sdp->sfds);
-       spin_unlock_irqrestore(&sdp->sfd_lock, flags);
+       read_unlock_irqrestore(&sg_index_lock, flags);
        return ret;
 }
 
@@ -239,6 +265,7 @@ sg_open(struct inode *inode, struct file *filp)
        struct request_queue *q;
        Sg_device *sdp;
        Sg_fd *sfp;
+       int res;
        int retval;
 
        nonseekable_open(inode, filp);
@@ -267,52 +294,54 @@ sg_open(struct inode *inode, struct file *filp)
                goto error_out;
        }
 
-       if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) {
-               retval = -EPERM; /* Can't lock it with read only access */
-               goto error_out;
-       }
-       if (flags & O_NONBLOCK) {
-               if (flags & O_EXCL) {
-                       if (!down_write_trylock(&sdp->o_sem)) {
-                               retval = -EBUSY;
-                               goto error_out;
-                       }
-               } else {
-                       if (!down_read_trylock(&sdp->o_sem)) {
-                               retval = -EBUSY;
-                               goto error_out;
-                       }
+       if (flags & O_EXCL) {
+               if (O_RDONLY == (flags & O_ACCMODE)) {
+                       retval = -EPERM; /* Can't lock it with read only access */
+                       goto error_out;
+               }
+               if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
+                       retval = -EBUSY;
+                       goto error_out;
+               }
+               res = wait_event_interruptible(sdp->o_excl_wait,
+                                          ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
+               if (res) {
+                       retval = res;   /* -ERESTARTSYS because signal hit process */
+                       goto error_out;
+               }
+       } else if (get_exclude(sdp)) {  /* some other fd has an exclusive lock on dev */
+               if (flags & O_NONBLOCK) {
+                       retval = -EBUSY;
+                       goto error_out;
+               }
+               res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
+               if (res) {
+                       retval = res;   /* -ERESTARTSYS because signal hit process */
+                       goto error_out;
                }
-       } else {
-               if (flags & O_EXCL)
-                       down_write(&sdp->o_sem);
-               else
-                       down_read(&sdp->o_sem);
        }
-       /* Since write lock is held, no need to check sfd_list */
-       if (flags & O_EXCL)
-               sdp->exclude = 1;       /* used by release lock */
-
+       if (sdp->detached) {
+               retval = -ENODEV;
+               goto error_out;
+       }
        if (sfds_list_empty(sdp)) {     /* no existing opens on this device */
                sdp->sgdebug = 0;
                q = sdp->device->request_queue;
                sdp->sg_tablesize = queue_max_segments(q);
        }
-       sfp = sg_add_sfp(sdp, dev);
-       if (!IS_ERR(sfp))
+       if ((sfp = sg_add_sfp(sdp, dev)))
                filp->private_data = sfp;
-               /* retval is already provably zero at this point because of the
-                * check after retval = scsi_autopm_get_device(sdp->device))
-                */
        else {
-               retval = PTR_ERR(sfp);
-
                if (flags & O_EXCL) {
-                       sdp->exclude = 0;       /* undo if error */
-                       up_write(&sdp->o_sem);
-               } else
-                       up_read(&sdp->o_sem);
+                       set_exclude(sdp, 0);    /* undo if error */
+                       wake_up_interruptible(&sdp->o_excl_wait);
+               }
+               retval = -ENOMEM;
+               goto error_out;
+       }
+       retval = 0;
 error_out:
+       if (retval) {
                scsi_autopm_put_device(sdp->device);
 sdp_put:
                scsi_device_put(sdp->device);
@@ -329,18 +358,13 @@ sg_release(struct inode *inode, struct file *filp)
 {
        Sg_device *sdp;
        Sg_fd *sfp;
-       int excl;
 
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
        SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
 
-       excl = sdp->exclude;
-       sdp->exclude = 0;
-       if (excl)
-               up_write(&sdp->o_sem);
-       else
-               up_read(&sdp->o_sem);
+       set_exclude(sdp, 0);
+       wake_up_interruptible(&sdp->o_excl_wait);
 
        scsi_autopm_put_device(sdp->device);
        kref_put(&sfp->f_ref, sg_remove_sfp);
@@ -1391,9 +1415,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
        disk->first_minor = k;
        sdp->disk = disk;
        sdp->device = scsidp;
-       spin_lock_init(&sdp->sfd_lock);
        INIT_LIST_HEAD(&sdp->sfds);
-       init_rwsem(&sdp->o_sem);
+       init_waitqueue_head(&sdp->o_excl_wait);
        sdp->sg_tablesize = queue_max_segments(q);
        sdp->index = k;
        kref_init(&sdp->d_ref);
@@ -1526,13 +1549,11 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
 
        /* Need a write lock to set sdp->detached. */
        write_lock_irqsave(&sg_index_lock, iflags);
-       spin_lock(&sdp->sfd_lock);
        sdp->detached = 1;
        list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
                wake_up_interruptible(&sfp->read_wait);
                kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
        }
-       spin_unlock(&sdp->sfd_lock);
        write_unlock_irqrestore(&sg_index_lock, iflags);
 
        sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@@ -2043,7 +2064,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
 
        sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
        if (!sfp)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
        init_waitqueue_head(&sfp->read_wait);
        rwlock_init(&sfp->rq_list_lock);
@@ -2057,13 +2078,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
        sfp->cmd_q = SG_DEF_COMMAND_Q;
        sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
        sfp->parentdp = sdp;
-       spin_lock_irqsave(&sdp->sfd_lock, iflags);
-       if (sdp->detached) {
-               spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
-               return ERR_PTR(-ENODEV);
-       }
+       write_lock_irqsave(&sg_index_lock, iflags);
        list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
-       spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
+       write_unlock_irqrestore(&sg_index_lock, iflags);
        SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
        if (unlikely(sg_big_buff != def_reserved_size))
                sg_big_buff = def_reserved_size;
@@ -2113,9 +2130,10 @@ static void sg_remove_sfp(struct kref *kref)
        struct sg_device *sdp = sfp->parentdp;
        unsigned long iflags;
 
-       spin_lock_irqsave(&sdp->sfd_lock, iflags);
+       write_lock_irqsave(&sg_index_lock, iflags);
        list_del(&sfp->sfd_siblings);
-       spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
+       write_unlock_irqrestore(&sg_index_lock, iflags);
+       wake_up_interruptible(&sdp->o_excl_wait);
 
        INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
        schedule_work(&sfp->ew.work);
@@ -2502,7 +2520,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
        return 0;
 }
 
-/* must be called while holding sg_index_lock and sfd_lock */
+/* must be called while holding sg_index_lock */
 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
 {
        int k, m, new_interface, blen, usg;
@@ -2587,26 +2605,22 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
 
        read_lock_irqsave(&sg_index_lock, iflags);
        sdp = it ? sg_lookup_dev(it->index) : NULL;
-       if (sdp) {
-               spin_lock(&sdp->sfd_lock);
-               if (!list_empty(&sdp->sfds)) {
-                       struct scsi_device *scsidp = sdp->device;
+       if (sdp && !list_empty(&sdp->sfds)) {
+               struct scsi_device *scsidp = sdp->device;
 
-                       seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
-                       if (sdp->detached)
-                               seq_printf(s, "detached pending close ");
-                       else
-                               seq_printf
-                                   (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
-                                    scsidp->host->host_no,
-                                    scsidp->channel, scsidp->id,
-                                    scsidp->lun,
-                                    scsidp->host->hostt->emulated);
-                       seq_printf(s, " sg_tablesize=%d excl=%d\n",
-                                  sdp->sg_tablesize, sdp->exclude);
-                       sg_proc_debug_helper(s, sdp);
-               }
-               spin_unlock(&sdp->sfd_lock);
+               seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
+               if (sdp->detached)
+                       seq_printf(s, "detached pending close ");
+               else
+                       seq_printf
+                           (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
+                            scsidp->host->host_no,
+                            scsidp->channel, scsidp->id,
+                            scsidp->lun,
+                            scsidp->host->hostt->emulated);
+               seq_printf(s, " sg_tablesize=%d excl=%d\n",
+                          sdp->sg_tablesize, get_exclude(sdp));
+               sg_proc_debug_helper(s, sdp);
        }
        read_unlock_irqrestore(&sg_index_lock, iflags);
        return 0;
index f91bc1fdd895eda2c3e43c2553fef784d15f43c5..639ba96adb36b58e1c21b6ae037f084be512cdfc 100644 (file)
@@ -1960,6 +1960,7 @@ cntrlEnd:
 
                BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
 
+               memset(&DevInfo, 0, sizeof(DevInfo));
                DevInfo.MaxRDMBufferSize = BUFFER_4K;
                DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
                DevInfo.u32RxAlignmentCorrection = 0;
index 6ccb64fb07865821741d9c3406377bf97cfe7994..6ce0af9977d8c0ccc053e98eb818a98ec8d990f7 100644 (file)
@@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
        struct oz_app_hdr *app_hdr;
        struct oz_serial_ctx *ctx;
 
+       if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+               return -EINVAL;
+
        spin_lock_bh(&g_cdev.lock);
        pd = g_cdev.active_pd;
        if (pd)
index 23db32f07fd533ac4803b9a0a56815a5a7606eea..a10cdb17038bf9717bbb20f979fb104dda9ccb27 100644 (file)
@@ -1063,7 +1063,7 @@ static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
 
 static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
 {
-       struct serial_icounter_struct icount;
+       struct serial_icounter_struct icount = {};
        struct sb_uart_icount cnow;
        struct sb_uart_port *port = state->port;
 
index c97e0e154d285d8b638dc5adb7a1bd4ac764d586..7e10dcdc3090085460918be8dfa9261df14eaa13 100644 (file)
@@ -570,6 +570,7 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
        ltv_t                   *pLtv;
        bool_t                  ltvAllocated = FALSE;
        ENCSTRCT                sEncryption;
+       size_t                  len;
 
 #ifdef USE_WDS
        hcf_16                  hcfPort  = HCF_PORT_0;
@@ -686,7 +687,8 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
                                        break;
                                case CFG_CNF_OWN_NAME:
                                        memset(lp->StationName, 0, sizeof(lp->StationName));
-                                       memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
+                                       len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
+                                       strlcpy(lp->StationName, &pLtv->u.u8[2], len);
                                        pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
                                        break;
                                case CFG_CNF_LOAD_BALANCING:
@@ -1783,6 +1785,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
 {
        struct wl_private *lp = wl_priv(dev);
        unsigned long flags;
+       size_t len;
        int         ret = 0;
        /*------------------------------------------------------------------------*/
 
@@ -1793,8 +1796,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
        wl_lock(lp, &flags);
 
        memset(lp->StationName, 0, sizeof(lp->StationName));
-
-       memcpy(lp->StationName, extra, wrqu->data.length);
+       len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
+       strlcpy(lp->StationName, extra, len);
 
        /* Commit the adapter parameters */
        wl_apply(lp);
index d067285a2d203765d38d36535d588854988f75c2..6b0f75eac8a26de2c6a3a83eac061ae7a03295cd 100644 (file)
@@ -1499,7 +1499,7 @@ static void atmel_set_ops(struct uart_port *port)
 /*
  * Get ip name usart or uart
  */
-static int atmel_get_ip_name(struct uart_port *port)
+static void atmel_get_ip_name(struct uart_port *port)
 {
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
        int name = UART_GET_IP_NAME(port);
@@ -1518,10 +1518,7 @@ static int atmel_get_ip_name(struct uart_port *port)
                atmel_port->is_usart = false;
        } else {
                dev_err(port->dev, "Not supported ip name, set to uart\n");
-               return -EINVAL;
        }
-
-       return 0;
 }
 
 /*
@@ -2405,9 +2402,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
        /*
         * Get port name of usart or uart
         */
-       ret = atmel_get_ip_name(&port->uart);
-       if (ret < 0)
-               goto err_add_port;
+       atmel_get_ip_name(&port->uart);
 
        return 0;
 
index ba475632c5fa2aea166efa3b03985139e071db2f..0e808cf91d97e94b6d973ca4c5937a2939206df8 100644 (file)
@@ -642,16 +642,29 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
 {
        struct uio_device *idev = vma->vm_private_data;
        int mi = uio_find_mem_index(vma);
+       struct uio_mem *mem;
        if (mi < 0)
                return -EINVAL;
+       mem = idev->info->mem + mi;
 
-       vma->vm_ops = &uio_physical_vm_ops;
+       if (vma->vm_end - vma->vm_start > mem->size)
+               return -EINVAL;
 
+       vma->vm_ops = &uio_physical_vm_ops;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
+       /*
+        * We cannot use the vm_iomap_memory() helper here,
+        * because vma->vm_pgoff is the map index we looked
+        * up above in uio_find_mem_index(), rather than an
+        * actual page offset into the mmap.
+        *
+        * So we just do the physical mmap without a page
+        * offset.
+        */
        return remap_pfn_range(vma,
                               vma->vm_start,
-                              idev->info->mem[mi].addr >> PAGE_SHIFT,
+                              mem->addr >> PAGE_SHIFT,
                               vma->vm_end - vma->vm_start,
                               vma->vm_page_prot);
 }
index a54ccdc4d6618bf4bdcfd5202f3e8489f22cec34..22ad85242e5b923d84eee490fc56e8ed0f29c3b7 100644 (file)
@@ -361,37 +361,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
 int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
 {
        struct au1100fb_device *fbdev;
-       unsigned int len;
-       unsigned long start=0, off;
 
        fbdev = to_au1100fb_device(fbi);
 
-       if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
-               return -EINVAL;
-       }
-
-       start = fbdev->fb_phys & PAGE_MASK;
-       len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
-
-       off = vma->vm_pgoff << PAGE_SHIFT;
-
-       if ((vma->vm_end - vma->vm_start + off) > len) {
-               return -EINVAL;
-       }
-
-       off += start;
-       vma->vm_pgoff = off >> PAGE_SHIFT;
-
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
 
-       if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-                               vma->vm_end - vma->vm_start,
-                               vma->vm_page_prot)) {
-               return -EAGAIN;
-       }
-
-       return 0;
+       return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
 }
 
 static struct fb_ops au1100fb_ops =
index 301224ecc9507186e78e9cc14576e7a5316b227d..1d02897d17f27e91ace5622b309c730a8364187c 100644 (file)
@@ -1233,34 +1233,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
  * method mainly to allow the use of the TLB streaming flag (CCA=6)
  */
 static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-
 {
-       unsigned int len;
-       unsigned long start=0, off;
        struct au1200fb_device *fbdev = info->par;
 
-       if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
-               return -EINVAL;
-       }
-
-       start = fbdev->fb_phys & PAGE_MASK;
-       len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
-
-       off = vma->vm_pgoff << PAGE_SHIFT;
-
-       if ((vma->vm_end - vma->vm_start + off) > len) {
-               return -EINVAL;
-       }
-
-       off += start;
-       vma->vm_pgoff = off >> PAGE_SHIFT;
-
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
 
-       return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-                                 vma->vm_end - vma->vm_start,
-                                 vma->vm_page_prot);
+       return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
 }
 
 static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
index 20532cb0b06e7ebfc6887e87fd3781e65eb41999..ae6ebb88ceff15ccef25f26804532d8b89cbe45a 100644 (file)
@@ -542,7 +542,7 @@ EXPORT_SYMBOL(d_drop);
  * If ref is non-zero, then decrement the refcount too.
  * Returns dentry requiring refcount drop, or NULL if we're done.
  */
-static inline struct dentry *
+static struct dentry *
 dentry_kill(struct dentry *dentry, int unlock_on_failure)
        __releases(dentry->d_lock)
 {
@@ -630,7 +630,8 @@ repeat:
                        goto kill_it;
        }
 
-       dentry->d_flags |= DCACHE_REFERENCED;
+       if (!(dentry->d_flags & DCACHE_REFERENCED))
+               dentry->d_flags |= DCACHE_REFERENCED;
        dentry_lru_add(dentry);
 
        dentry->d_lockref.count--;
index 473e09da7d02d3396273221f3094824c91f3b030..810c28fb8c3c764dd5a3162ad3fd4239a4433a55 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/mutex.h>
 #include <linux/anon_inodes.h>
 #include <linux/device.h>
-#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/mman.h>
@@ -1605,8 +1604,7 @@ fetch_events:
                        }
 
                        spin_unlock_irqrestore(&ep->lock, flags);
-                       if (!freezable_schedule_hrtimeout_range(to, slack,
-                                                               HRTIMER_MODE_ABS))
+                       if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
                                timed_out = 1;
 
                        spin_lock_irqsave(&ep->lock, flags);
index 35d4adc749d9616f4bb2a7cd6d952eb18b5579b4..dfd5cb18c0127faeea806a29ef090ee14affc87e 100644 (file)
@@ -238,8 +238,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
 
        set_current_state(state);
        if (!pwq->triggered)
-               rc = freezable_schedule_hrtimeout_range(expires, slack,
-                                                       HRTIMER_MODE_ABS);
+               rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
        __set_current_state(TASK_RUNNING);
 
        /*
index cc88172c7d9a827f919592eba81275c052f9046b..c74088ab103b2aeb3e13497af1cd09db4f6f18fb 100644 (file)
@@ -332,7 +332,7 @@ do {                                                                        \
 #endif
 
 #ifndef this_cpu_sub
-# define this_cpu_sub(pcp, val)                this_cpu_add((pcp), -(val))
+# define this_cpu_sub(pcp, val)                this_cpu_add((pcp), -(typeof(pcp))(val))
 #endif
 
 #ifndef this_cpu_inc
@@ -418,7 +418,7 @@ do {                                                                        \
 # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
 #endif
 
-#define this_cpu_sub_return(pcp, val)  this_cpu_add_return(pcp, -(val))
+#define this_cpu_sub_return(pcp, val)  this_cpu_add_return(pcp, -(typeof(pcp))(val))
 #define this_cpu_inc_return(pcp)       this_cpu_add_return(pcp, 1)
 #define this_cpu_dec_return(pcp)       this_cpu_add_return(pcp, -1)
 
@@ -586,7 +586,7 @@ do {                                                                        \
 #endif
 
 #ifndef __this_cpu_sub
-# define __this_cpu_sub(pcp, val)      __this_cpu_add((pcp), -(val))
+# define __this_cpu_sub(pcp, val)      __this_cpu_add((pcp), -(typeof(pcp))(val))
 #endif
 
 #ifndef __this_cpu_inc
@@ -668,7 +668,7 @@ do {                                                                        \
        __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
 #endif
 
-#define __this_cpu_sub_return(pcp, val)        __this_cpu_add_return(pcp, -(val))
+#define __this_cpu_sub_return(pcp, val)        __this_cpu_add_return(pcp, -(typeof(pcp))(val))
 #define __this_cpu_inc_return(pcp)     __this_cpu_add_return(pcp, 1)
 #define __this_cpu_dec_return(pcp)     __this_cpu_add_return(pcp, -1)
 
index 06344d986eb9dde61f341057f431d3183301f54e..094f3152ec2bf318dfe82797baede3b516cbd527 100644 (file)
@@ -983,7 +983,7 @@ config DEBUG_KOBJECT
 
 config DEBUG_KOBJECT_RELEASE
        bool "kobject release debugging"
-       depends on DEBUG_KERNEL
+       depends on DEBUG_OBJECTS_TIMERS
        help
          kobjects are reference counted objects.  This means that their
          last reference count put is not predictable, and the kobject can
index a685c8a79578b274cb361d442b953507e58d7d63..d16fa295ae1dbd0d43f1303b7796086cd968344c 100644 (file)
@@ -577,7 +577,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
                miter->__offset += miter->consumed;
                miter->__remaining -= miter->consumed;
 
-               if (miter->__flags & SG_MITER_TO_SG)
+               if ((miter->__flags & SG_MITER_TO_SG) &&
+                   !PageSlab(miter->page))
                        flush_kernel_dcache_page(miter->page);
 
                if (miter->__flags & SG_MITER_ATOMIC) {
index 610e3df2768a6a5b2ec1e293da4c96dafbbe2d30..cca80d96e509efdbf1fcdfdbdb19e5d671d064d5 100644 (file)
@@ -1278,64 +1278,90 @@ out:
 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                unsigned long addr, pmd_t pmd, pmd_t *pmdp)
 {
+       struct anon_vma *anon_vma = NULL;
        struct page *page;
        unsigned long haddr = addr & HPAGE_PMD_MASK;
+       int page_nid = -1, this_nid = numa_node_id();
        int target_nid;
-       int current_nid = -1;
-       bool migrated;
+       bool page_locked;
+       bool migrated = false;
 
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(pmd, *pmdp)))
                goto out_unlock;
 
        page = pmd_page(pmd);
-       get_page(page);
-       current_nid = page_to_nid(page);
+       page_nid = page_to_nid(page);
        count_vm_numa_event(NUMA_HINT_FAULTS);
-       if (current_nid == numa_node_id())
+       if (page_nid == this_nid)
                count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 
+       /*
+        * Acquire the page lock to serialise THP migrations but avoid dropping
+        * page_table_lock if at all possible
+        */
+       page_locked = trylock_page(page);
        target_nid = mpol_misplaced(page, vma, haddr);
        if (target_nid == -1) {
-               put_page(page);
-               goto clear_pmdnuma;
+               /* If the page was locked, there are no parallel migrations */
+               if (page_locked)
+                       goto clear_pmdnuma;
+
+               /*
+                * Otherwise wait for potential migrations and retry. We do
+                * relock and check_same as the page may no longer be mapped.
+                * As the fault is being retried, do not account for it.
+                */
+               spin_unlock(&mm->page_table_lock);
+               wait_on_page_locked(page);
+               page_nid = -1;
+               goto out;
        }
 
-       /* Acquire the page lock to serialise THP migrations */
+       /* Page is misplaced, serialise migrations and parallel THP splits */
+       get_page(page);
        spin_unlock(&mm->page_table_lock);
-       lock_page(page);
+       if (!page_locked)
+               lock_page(page);
+       anon_vma = page_lock_anon_vma_read(page);
 
        /* Confirm the PTE did not while locked */
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(pmd, *pmdp))) {
                unlock_page(page);
                put_page(page);
+               page_nid = -1;
                goto out_unlock;
        }
-       spin_unlock(&mm->page_table_lock);
 
-       /* Migrate the THP to the requested node */
+       /*
+        * Migrate the THP to the requested node, returns with page unlocked
+        * and pmd_numa cleared.
+        */
+       spin_unlock(&mm->page_table_lock);
        migrated = migrate_misplaced_transhuge_page(mm, vma,
                                pmdp, pmd, addr, page, target_nid);
-       if (!migrated)
-               goto check_same;
-
-       task_numa_fault(target_nid, HPAGE_PMD_NR, true);
-       return 0;
+       if (migrated)
+               page_nid = target_nid;
 
-check_same:
-       spin_lock(&mm->page_table_lock);
-       if (unlikely(!pmd_same(pmd, *pmdp)))
-               goto out_unlock;
+       goto out;
 clear_pmdnuma:
+       BUG_ON(!PageLocked(page));
        pmd = pmd_mknonnuma(pmd);
        set_pmd_at(mm, haddr, pmdp, pmd);
        VM_BUG_ON(pmd_numa(*pmdp));
        update_mmu_cache_pmd(vma, addr, pmdp);
+       unlock_page(page);
 out_unlock:
        spin_unlock(&mm->page_table_lock);
-       if (current_nid != -1)
-               task_numa_fault(current_nid, HPAGE_PMD_NR, false);
+
+out:
+       if (anon_vma)
+               page_unlock_anon_vma_read(anon_vma);
+
+       if (page_nid != -1)
+               task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
+
        return 0;
 }
 
index 72467914b85640bb88730cb4a5c54113e531b35a..72f9decb0104f046cef155532200cd91e15fea58 100644 (file)
@@ -81,8 +81,9 @@ restart:
                 * decrement nr_to_walk first so that we don't livelock if we
                 * get stuck on large numbesr of LRU_RETRY items
                 */
-               if (--(*nr_to_walk) == 0)
+               if (!*nr_to_walk)
                        break;
+               --*nr_to_walk;
 
                ret = isolate(item, &nlru->lock, cb_arg);
                switch (ret) {
index 34d3ca9572d6baed85499d099a0050af3b9bdf66..13b9d0f221b8460ae3c361dd51d22ae969e78325 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/page_cgroup.h>
 #include <linux/cpu.h>
 #include <linux/oom.h>
+#include <linux/lockdep.h>
 #include "internal.h"
 #include <net/sock.h>
 #include <net/ip.h>
@@ -2046,6 +2047,12 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
        return total;
 }
 
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map memcg_oom_lock_dep_map = {
+       .name = "memcg_oom_lock",
+};
+#endif
+
 static DEFINE_SPINLOCK(memcg_oom_lock);
 
 /*
@@ -2083,7 +2090,8 @@ static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
                        }
                        iter->oom_lock = false;
                }
-       }
+       } else
+               mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
 
        spin_unlock(&memcg_oom_lock);
 
@@ -2095,6 +2103,7 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
        struct mem_cgroup *iter;
 
        spin_lock(&memcg_oom_lock);
+       mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
        for_each_mem_cgroup_tree(iter, memcg)
                iter->oom_lock = false;
        spin_unlock(&memcg_oom_lock);
@@ -2765,10 +2774,10 @@ done:
        *ptr = memcg;
        return 0;
 nomem:
-       *ptr = NULL;
-       if (gfp_mask & __GFP_NOFAIL)
-               return 0;
-       return -ENOMEM;
+       if (!(gfp_mask & __GFP_NOFAIL)) {
+               *ptr = NULL;
+               return -ENOMEM;
+       }
 bypass:
        *ptr = root_mem_cgroup;
        return -EINTR;
@@ -3773,8 +3782,7 @@ void mem_cgroup_move_account_page_stat(struct mem_cgroup *from,
 {
        /* Update stat data for mem_cgroup */
        preempt_disable();
-       WARN_ON_ONCE(from->stat->count[idx] < nr_pages);
-       __this_cpu_add(from->stat->count[idx], -nr_pages);
+       __this_cpu_sub(from->stat->count[idx], nr_pages);
        __this_cpu_add(to->stat->count[idx], nr_pages);
        preempt_enable();
 }
@@ -4950,31 +4958,18 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
        } while (usage > 0);
 }
 
-/*
- * This mainly exists for tests during the setting of set of use_hierarchy.
- * Since this is the very setting we are changing, the current hierarchy value
- * is meaningless
- */
-static inline bool __memcg_has_children(struct mem_cgroup *memcg)
-{
-       struct cgroup_subsys_state *pos;
-
-       /* bounce at first found */
-       css_for_each_child(pos, &memcg->css)
-               return true;
-       return false;
-}
-
-/*
- * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
- * to be already dead (as in mem_cgroup_force_empty, for instance).  This is
- * from mem_cgroup_count_children(), in the sense that we don't really care how
- * many children we have; we only need to know if we have any.  It also counts
- * any memcg without hierarchy as infertile.
- */
 static inline bool memcg_has_children(struct mem_cgroup *memcg)
 {
-       return memcg->use_hierarchy && __memcg_has_children(memcg);
+       lockdep_assert_held(&memcg_create_mutex);
+       /*
+        * The lock does not prevent addition or deletion to the list
+        * of children, but it prevents a new child from being
+        * initialized based on this parent in css_online(), so it's
+        * enough to decide whether hierarchically inherited
+        * attributes can still be changed or not.
+        */
+       return memcg->use_hierarchy &&
+               !list_empty(&memcg->css.cgroup->children);
 }
 
 /*
@@ -5054,7 +5049,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
         */
        if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
                                (val == 1 || val == 0)) {
-               if (!__memcg_has_children(memcg))
+               if (list_empty(&memcg->css.cgroup->children))
                        memcg->use_hierarchy = val;
                else
                        retval = -EBUSY;
index 1311f26497e6a0f776682ed8a7e23b8620a5b9ad..d176154c243f2ffad8cf4b1b39f6f30d17c97733 100644 (file)
@@ -3521,12 +3521,12 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
-                               unsigned long addr, int current_nid)
+                               unsigned long addr, int page_nid)
 {
        get_page(page);
 
        count_vm_numa_event(NUMA_HINT_FAULTS);
-       if (current_nid == numa_node_id())
+       if (page_nid == numa_node_id())
                count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 
        return mpol_misplaced(page, vma, addr);
@@ -3537,7 +3537,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct page *page = NULL;
        spinlock_t *ptl;
-       int current_nid = -1;
+       int page_nid = -1;
        int target_nid;
        bool migrated = false;
 
@@ -3567,15 +3567,10 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                return 0;
        }
 
-       current_nid = page_to_nid(page);
-       target_nid = numa_migrate_prep(page, vma, addr, current_nid);
+       page_nid = page_to_nid(page);
+       target_nid = numa_migrate_prep(page, vma, addr, page_nid);
        pte_unmap_unlock(ptep, ptl);
        if (target_nid == -1) {
-               /*
-                * Account for the fault against the current node if it not
-                * being replaced regardless of where the page is located.
-                */
-               current_nid = numa_node_id();
                put_page(page);
                goto out;
        }
@@ -3583,11 +3578,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /* Migrate to the requested node */
        migrated = migrate_misplaced_page(page, target_nid);
        if (migrated)
-               current_nid = target_nid;
+               page_nid = target_nid;
 
 out:
-       if (current_nid != -1)
-               task_numa_fault(current_nid, 1, migrated);
+       if (page_nid != -1)
+               task_numa_fault(page_nid, 1, migrated);
        return 0;
 }
 
@@ -3602,7 +3597,6 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long offset;
        spinlock_t *ptl;
        bool numa = false;
-       int local_nid = numa_node_id();
 
        spin_lock(&mm->page_table_lock);
        pmd = *pmdp;
@@ -3625,9 +3619,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
                pte_t pteval = *pte;
                struct page *page;
-               int curr_nid = local_nid;
+               int page_nid = -1;
                int target_nid;
-               bool migrated;
+               bool migrated = false;
+
                if (!pte_present(pteval))
                        continue;
                if (!pte_numa(pteval))
@@ -3649,25 +3644,19 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                if (unlikely(page_mapcount(page) != 1))
                        continue;
 
-               /*
-                * Note that the NUMA fault is later accounted to either
-                * the node that is currently running or where the page is
-                * migrated to.
-                */
-               curr_nid = local_nid;
-               target_nid = numa_migrate_prep(page, vma, addr,
-                                              page_to_nid(page));
-               if (target_nid == -1) {
+               page_nid = page_to_nid(page);
+               target_nid = numa_migrate_prep(page, vma, addr, page_nid);
+               pte_unmap_unlock(pte, ptl);
+               if (target_nid != -1) {
+                       migrated = migrate_misplaced_page(page, target_nid);
+                       if (migrated)
+                               page_nid = target_nid;
+               } else {
                        put_page(page);
-                       continue;
                }
 
-               /* Migrate to the requested node */
-               pte_unmap_unlock(pte, ptl);
-               migrated = migrate_misplaced_page(page, target_nid);
-               if (migrated)
-                       curr_nid = target_nid;
-               task_numa_fault(curr_nid, 1, migrated);
+               if (page_nid != -1)
+                       task_numa_fault(page_nid, 1, migrated);
 
                pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
        }
index 7a7325ee1d089696a8073a84d2f748f326124805..c04692774e88bee81f10c2ac85e9cf2752e32237 100644 (file)
@@ -1715,12 +1715,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                unlock_page(new_page);
                put_page(new_page);             /* Free it */
 
-               unlock_page(page);
+               /* Retake the callers reference and putback on LRU */
+               get_page(page);
                putback_lru_page(page);
-
-               count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-               isolated = 0;
-               goto out;
+               mod_zone_page_state(page_zone(page),
+                        NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
+               goto out_fail;
        }
 
        /*
@@ -1737,9 +1737,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
        entry = pmd_mkhuge(entry);
 
-       page_add_new_anon_rmap(new_page, vma, haddr);
-
+       pmdp_clear_flush(vma, haddr, pmd);
        set_pmd_at(mm, haddr, pmd, entry);
+       page_add_new_anon_rmap(new_page, vma, haddr);
        update_mmu_cache_pmd(vma, address, &entry);
        page_remove_rmap(page);
        /*
@@ -1758,7 +1758,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
        count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
 
-out:
        mod_zone_page_state(page_zone(page),
                        NR_ISOLATED_ANON + page_lru,
                        -HPAGE_PMD_NR);
@@ -1767,6 +1766,10 @@ out:
 out_fail:
        count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
 out_dropref:
+       entry = pmd_mknonnuma(entry);
+       set_pmd_at(mm, haddr, pmd, entry);
+       update_mmu_cache_pmd(vma, address, &entry);
+
        unlock_page(page);
        put_page(page);
        return 0;
index a3af058f68e4d9f434337d0dcd6127d5d8c6d039..412ba2b7326a2898d25e4b0a43b3dfd921a389a1 100644 (file)
@@ -148,7 +148,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                                split_huge_page_pmd(vma, addr, pmd);
                        else if (change_huge_pmd(vma, pmd, addr, newprot,
                                                 prot_numa)) {
-                               pages += HPAGE_PMD_NR;
+                               pages++;
                                continue;
                        }
                        /* fall through */
index 5da2cbcfdbb56b0e9f4fe27d6e04137e59dfce3b..2beeabf502c50f1ff3069981bd5b3a84b7463b6f 100644 (file)
@@ -242,7 +242,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
                if (err)
                        break;
                pgd++;
-       } while (addr = next, addr != end);
+       } while (addr = next, addr < end);
 
        return err;
 }
index e297b74471b8a32e76912342f50f77534360ec87..ca0d3d9f4bac831d377fd90f9a38be1021c486ec 100644 (file)
@@ -90,8 +90,20 @@ OPTIONS
        Number of mmap data pages. Must be a power of two.
 
 -g::
+       Enables call-graph (stack chain/backtrace) recording.
+
 --call-graph::
-       Do call-graph (stack chain/backtrace) recording.
+       Setup and enable call-graph (stack chain/backtrace) recording,
+       implies -g.
+
+       Allows specifying "fp" (frame pointer) or "dwarf"
+       (DWARF's CFI - Call Frame Information) as the method to collect
+       the information used to show the call graphs.
+
+       In some systems, where binaries are build with gcc
+       --fomit-frame-pointer, using the "fp" method will produce bogus
+       call graphs, using "dwarf", if available (perf tools linked to
+       the libunwind library) should be used instead.
 
 -q::
 --quiet::
index 58d6598a968679fb4c020f9d932443a57f9feee8..6a118e71d0032e15d3ce8d55b91c24a1be83e31f 100644 (file)
@@ -140,20 +140,12 @@ Default is to monitor all CPUS.
 --asm-raw::
        Show raw instruction encoding of assembly instructions.
 
--G [type,min,order]::
+-G::
+       Enables call-graph (stack chain/backtrace) recording.
+
 --call-graph::
-        Display call chains using type, min percent threshold and order.
-       type can be either:
-       - flat: single column, linear exposure of call chains.
-       - graph: use a graph tree, displaying absolute overhead rates.
-       - fractal: like graph, but displays relative rates. Each branch of
-                the tree is considered as a new profiled object.
-
-       order can be either:
-       - callee: callee based call graph.
-       - caller: inverted caller based call graph.
-
-       Default: fractal,0.5,callee.
+       Setup and enable call-graph (stack chain/backtrace) recording,
+       implies -G.
 
 --ignore-callees=<regex>::
         Ignore callees of the function(s) matching the given regex.
index 935d52216c899eb42774ae9f6e63767ff77fc8ba..fbc2888d64950040d3f56444d11bc0e828396b31 100644 (file)
@@ -888,11 +888,18 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
        while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
                err = perf_evlist__parse_sample(kvm->evlist, event, &sample);
                if (err) {
+                       perf_evlist__mmap_consume(kvm->evlist, idx);
                        pr_err("Failed to parse sample\n");
                        return -1;
                }
 
                err = perf_session_queue_event(kvm->session, event, &sample, 0);
+               /*
+                * FIXME: Here we can't consume the event, as perf_session_queue_event will
+                *        point to it, and it'll get possibly overwritten by the kernel.
+                */
+               perf_evlist__mmap_consume(kvm->evlist, idx);
+
                if (err) {
                        pr_err("Failed to enqueue sample: %d\n", err);
                        return -1;
index a41ac41546c962df3e0801b230f06b4aa9730c7a..d0465148464022c82e5fcfb431513624edf34740 100644 (file)
@@ -712,21 +712,12 @@ static int get_stack_size(char *str, unsigned long *_size)
 }
 #endif /* LIBUNWIND_SUPPORT */
 
-int record_parse_callchain_opt(const struct option *opt,
-                              const char *arg, int unset)
+int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
 {
-       struct perf_record_opts *opts = opt->value;
        char *tok, *name, *saveptr = NULL;
        char *buf;
        int ret = -1;
 
-       /* --no-call-graph */
-       if (unset)
-               return 0;
-
-       /* We specified default option if none is provided. */
-       BUG_ON(!arg);
-
        /* We need buffer that we know we can write to. */
        buf = malloc(strlen(arg) + 1);
        if (!buf)
@@ -764,13 +755,9 @@ int record_parse_callchain_opt(const struct option *opt,
                                ret = get_stack_size(tok, &size);
                                opts->stack_dump_size = size;
                        }
-
-                       if (!ret)
-                               pr_debug("callchain: stack dump size %d\n",
-                                        opts->stack_dump_size);
 #endif /* LIBUNWIND_SUPPORT */
                } else {
-                       pr_err("callchain: Unknown -g option "
+                       pr_err("callchain: Unknown --call-graph option "
                               "value: %s\n", arg);
                        break;
                }
@@ -778,13 +765,52 @@ int record_parse_callchain_opt(const struct option *opt,
        } while (0);
 
        free(buf);
+       return ret;
+}
+
+static void callchain_debug(struct perf_record_opts *opts)
+{
+       pr_debug("callchain: type %d\n", opts->call_graph);
 
+       if (opts->call_graph == CALLCHAIN_DWARF)
+               pr_debug("callchain: stack dump size %d\n",
+                        opts->stack_dump_size);
+}
+
+int record_parse_callchain_opt(const struct option *opt,
+                              const char *arg,
+                              int unset)
+{
+       struct perf_record_opts *opts = opt->value;
+       int ret;
+
+       /* --no-call-graph */
+       if (unset) {
+               opts->call_graph = CALLCHAIN_NONE;
+               pr_debug("callchain: disabled\n");
+               return 0;
+       }
+
+       ret = record_parse_callchain(arg, opts);
        if (!ret)
-               pr_debug("callchain: type %d\n", opts->call_graph);
+               callchain_debug(opts);
 
        return ret;
 }
 
+int record_callchain_opt(const struct option *opt,
+                        const char *arg __maybe_unused,
+                        int unset __maybe_unused)
+{
+       struct perf_record_opts *opts = opt->value;
+
+       if (opts->call_graph == CALLCHAIN_NONE)
+               opts->call_graph = CALLCHAIN_FP;
+
+       callchain_debug(opts);
+       return 0;
+}
+
 static const char * const record_usage[] = {
        "perf record [<options>] [<command>]",
        "perf record [<options>] -- <command> [<options>]",
@@ -813,12 +839,12 @@ static struct perf_record record = {
        },
 };
 
-#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "
+#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
 
 #ifdef LIBUNWIND_SUPPORT
-const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
+const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
 #else
-const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
+const char record_callchain_help[] = CALLCHAIN_HELP "fp";
 #endif
 
 /*
@@ -858,9 +884,12 @@ const struct option record_options[] = {
                     "number of mmap data pages"),
        OPT_BOOLEAN(0, "group", &record.opts.group,
                    "put the counters into a counter group"),
-       OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
-                            "mode[,dump_size]", record_callchain_help,
-                            &record_parse_callchain_opt, "fp"),
+       OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
+                          NULL, "enables call-graph recording" ,
+                          &record_callchain_opt),
+       OPT_CALLBACK(0, "call-graph", &record.opts,
+                    "mode[,dump_size]", record_callchain_help,
+                    &record_parse_callchain_opt),
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show counter open errors, etc)"),
        OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
index 212214162bb2820286694417035e470e1d465e76..5a11f13e56f9f186cc7aa0926f4ee63008327358 100644 (file)
@@ -810,7 +810,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
                ret = perf_evlist__parse_sample(top->evlist, event, &sample);
                if (ret) {
                        pr_err("Can't parse sample, err = %d\n", ret);
-                       continue;
+                       goto next_event;
                }
 
                evsel = perf_evlist__id2evsel(session->evlist, sample.id);
@@ -825,13 +825,13 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
                case PERF_RECORD_MISC_USER:
                        ++top->us_samples;
                        if (top->hide_user_symbols)
-                               continue;
+                               goto next_event;
                        machine = &session->machines.host;
                        break;
                case PERF_RECORD_MISC_KERNEL:
                        ++top->kernel_samples;
                        if (top->hide_kernel_symbols)
-                               continue;
+                               goto next_event;
                        machine = &session->machines.host;
                        break;
                case PERF_RECORD_MISC_GUEST_KERNEL:
@@ -847,7 +847,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
                         */
                        /* Fall thru */
                default:
-                       continue;
+                       goto next_event;
                }
 
 
@@ -859,6 +859,8 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
                        machine__process_event(machine, event);
                } else
                        ++session->stats.nr_unknown_events;
+next_event:
+               perf_evlist__mmap_consume(top->evlist, idx);
        }
 }
 
@@ -1016,16 +1018,16 @@ out_delete:
 }
 
 static int
-parse_callchain_opt(const struct option *opt, const char *arg, int unset)
+callchain_opt(const struct option *opt, const char *arg, int unset)
 {
-       /*
-        * --no-call-graph
-        */
-       if (unset)
-               return 0;
-
        symbol_conf.use_callchain = true;
+       return record_callchain_opt(opt, arg, unset);
+}
 
+static int
+parse_callchain_opt(const struct option *opt, const char *arg, int unset)
+{
+       symbol_conf.use_callchain = true;
        return record_parse_callchain_opt(opt, arg, unset);
 }
 
@@ -1106,9 +1108,12 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
                   "sort by key(s): pid, comm, dso, symbol, parent, weight, local_weight"),
        OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
                    "Show a column with the number of samples"),
-       OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts,
-                            "mode[,dump_size]", record_callchain_help,
-                            &parse_callchain_opt, "fp"),
+       OPT_CALLBACK_NOOPT('G', NULL, &top.record_opts,
+                          NULL, "enables call-graph recording",
+                          &callchain_opt),
+       OPT_CALLBACK(0, "call-graph", &top.record_opts,
+                    "mode[,dump_size]", record_callchain_help,
+                    &parse_callchain_opt),
        OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
                   "ignore callees of these functions in call graphs",
                   report_parse_ignore_callees_opt),
index 71aa3e35406bd064e87044d2ae71597a5f117092..99c8d9ad6729cabf6bebe5b65c753af1a28d9024 100644 (file)
@@ -987,7 +987,7 @@ again:
                        err = perf_evlist__parse_sample(evlist, event, &sample);
                        if (err) {
                                fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
-                               continue;
+                               goto next_event;
                        }
 
                        if (trace->base_time == 0)
@@ -1001,18 +1001,20 @@ again:
                        evsel = perf_evlist__id2evsel(evlist, sample.id);
                        if (evsel == NULL) {
                                fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
-                               continue;
+                               goto next_event;
                        }
 
                        if (sample.raw_data == NULL) {
                                fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
                                       perf_evsel__name(evsel), sample.tid,
                                       sample.cpu, sample.raw_size);
-                               continue;
+                               goto next_event;
                        }
 
                        handler = evsel->handler.func;
                        handler(trace, evsel, &sample);
+next_event:
+                       perf_evlist__mmap_consume(evlist, i);
 
                        if (done)
                                goto out_unmap_evlist;
index 6fb781d5586cd1d264bc223375f081e0b0ed0c82..e3fedfa2906e5912d6dac6da9ef73daa53820b6d 100644 (file)
@@ -290,6 +290,7 @@ static int process_events(struct machine *machine, struct perf_evlist *evlist,
        for (i = 0; i < evlist->nr_mmaps; i++) {
                while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
                        ret = process_event(machine, evlist, event, state);
+                       perf_evlist__mmap_consume(evlist, i);
                        if (ret < 0)
                                return ret;
                }
index d444ea2c47d9d03d5bacb01a35ac9288ca5971df..376c35608534a41dce87354c1741e5b29fbdd612 100644 (file)
@@ -36,6 +36,7 @@ static int find_comm(struct perf_evlist *evlist, const char *comm)
                            (pid_t)event->comm.tid == getpid() &&
                            strcmp(event->comm.comm, comm) == 0)
                                found += 1;
+                       perf_evlist__mmap_consume(evlist, i);
                }
        }
        return found;
index c4185b9aeb80e217e4e489ef3eb49d0035557f84..a7232c204eb94303173fad65a13194af233d118d 100644 (file)
@@ -122,6 +122,7 @@ int test__basic_mmap(void)
                        goto out_munmap;
                }
                nr_events[evsel->idx]++;
+               perf_evlist__mmap_consume(evlist, 0);
        }
 
        err = 0;
index fc5b9fca8b47421e13b4fe1c38e07f267be5ade1..524b221b829bebd1a9b6f97109e9e6a78c20ef48 100644 (file)
@@ -77,8 +77,10 @@ int test__syscall_open_tp_fields(void)
 
                                ++nr_events;
 
-                               if (type != PERF_RECORD_SAMPLE)
+                               if (type != PERF_RECORD_SAMPLE) {
+                                       perf_evlist__mmap_consume(evlist, i);
                                        continue;
+                               }
 
                                err = perf_evsel__parse_sample(evsel, event, &sample);
                                if (err) {
index b8a7056519ac7c29b430a87d73be85314b4d8082..7923b06ffc9198adde790901c8709dd8e66fa0b7 100644 (file)
@@ -263,6 +263,8 @@ int test__PERF_RECORD(void)
                                                 type);
                                        ++errs;
                                }
+
+                               perf_evlist__mmap_consume(evlist, i);
                        }
                }
 
index 0ab61b1f408ecfc71680b00b8f0e7f0ba6911b0d..4ca1b938f6a620380f8cae76435f8ad6b142eecd 100644 (file)
@@ -122,7 +122,7 @@ int test__perf_time_to_tsc(void)
                        if (event->header.type != PERF_RECORD_COMM ||
                            (pid_t)event->comm.pid != getpid() ||
                            (pid_t)event->comm.tid != getpid())
-                               continue;
+                               goto next_event;
 
                        if (strcmp(event->comm.comm, comm1) == 0) {
                                CHECK__(perf_evsel__parse_sample(evsel, event,
@@ -134,6 +134,8 @@ int test__perf_time_to_tsc(void)
                                                                 &sample));
                                comm2_time = sample.time;
                        }
+next_event:
+                       perf_evlist__mmap_consume(evlist, i);
                }
        }
 
index 2e41e2d32ccc48d3946086dbf3d734d736e3193f..6e2b44ec07495b716a45f649206f5088b2aea51f 100644 (file)
@@ -78,7 +78,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
                struct perf_sample sample;
 
                if (event->header.type != PERF_RECORD_SAMPLE)
-                       continue;
+                       goto next_event;
 
                err = perf_evlist__parse_sample(evlist, event, &sample);
                if (err < 0) {
@@ -88,6 +88,8 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
 
                total_periods += sample.period;
                nr_samples++;
+next_event:
+               perf_evlist__mmap_consume(evlist, 0);
        }
 
        if ((u64) nr_samples == total_periods) {
index 28fe5894b0618901493100ea0995c8dda093b9ec..a3e64876e940020d93aa674ea064d735bf688713 100644 (file)
@@ -96,10 +96,10 @@ int test__task_exit(void)
 
 retry:
        while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
-               if (event->header.type != PERF_RECORD_EXIT)
-                       continue;
+               if (event->header.type == PERF_RECORD_EXIT)
+                       nr_exit++;
 
-               nr_exit++;
+               perf_evlist__mmap_consume(evlist, 0);
        }
 
        if (!exited || !nr_exit) {
index 194e2f42ff5d1335eb1c2b5b53ea2ab6b1604932..6c152686e83763a9b18983d06f52e2a14b739b91 100644 (file)
@@ -315,8 +315,7 @@ static inline void advance_hpp(struct perf_hpp *hpp, int inc)
 }
 
 static int hist_entry__period_snprintf(struct perf_hpp *hpp,
-                                      struct hist_entry *he,
-                                      bool color)
+                                      struct hist_entry *he)
 {
        const char *sep = symbol_conf.field_sep;
        struct perf_hpp_fmt *fmt;
@@ -338,7 +337,7 @@ static int hist_entry__period_snprintf(struct perf_hpp *hpp,
                } else
                        first = false;
 
-               if (color && fmt->color)
+               if (perf_hpp__use_color() && fmt->color)
                        ret = fmt->color(fmt, hpp, he);
                else
                        ret = fmt->entry(fmt, hpp, he);
@@ -358,12 +357,11 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
                .buf            = bf,
                .size           = size,
        };
-       bool color = !symbol_conf.field_sep;
 
        if (size == 0 || size > bfsz)
                size = hpp.size = bfsz;
 
-       ret = hist_entry__period_snprintf(&hpp, he, color);
+       ret = hist_entry__period_snprintf(&hpp, he);
        hist_entry__sort_snprintf(he, bf + ret, size - ret, hists);
 
        ret = fprintf(fp, "%s\n", bf);
@@ -482,6 +480,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
 
 print_entries:
        linesz = hists__sort_list_width(hists) + 3 + 1;
+       linesz += perf_hpp__color_overhead();
        line = malloc(linesz);
        if (line == NULL) {
                ret = -1;
index 2b585bc308cff6f5f7cdfa1685c469007b0db3b5..9e99060408ae759e39401f686300090b76e52edf 100644 (file)
@@ -147,6 +147,9 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
 
 struct option;
 
+int record_parse_callchain(const char *arg, struct perf_record_opts *opts);
 int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
+int record_callchain_opt(const struct option *opt, const char *arg, int unset);
+
 extern const char record_callchain_help[];
 #endif /* __PERF_CALLCHAIN_H */
index 63df031fc9c73868160305b7875d6e20eded09df..49096ea58a15dec22dc1e20b0de5d69bf2c2ac3a 100644 (file)
@@ -213,7 +213,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                       &event->mmap.pgoff,
                       execname);
 
-               if (n != 8)
+               if (n != 5)
                        continue;
 
                if (prot[2] != 'x')
index f9f77bee0b1b416414fa3561fb2eee4785e502c4..e584cd30b0f2dca4866919e578e4c8e9e3f71ade 100644 (file)
@@ -545,12 +545,19 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
 
        md->prev = old;
 
-       if (!evlist->overwrite)
-               perf_mmap__write_tail(md, old);
-
        return event;
 }
 
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
+{
+       if (!evlist->overwrite) {
+               struct perf_mmap *md = &evlist->mmap[idx];
+               unsigned int old = md->prev;
+
+               perf_mmap__write_tail(md, old);
+       }
+}
+
 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
 {
        if (evlist->mmap[idx].base != NULL) {
index 880d7139d2fb3fce78d75b8e701251827045588e..206d093393069012421a65b6dc893f570de32dd9 100644 (file)
@@ -89,6 +89,8 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
 
 union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
 
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
+
 int perf_evlist__open(struct perf_evlist *evlist);
 void perf_evlist__close(struct perf_evlist *evlist);
 
index 1329b6b6ffe61b0f5fb97d8582a837f554b3c752..ce8dc61ce2c358ddea5851079916292e6838a370 100644 (file)
@@ -5,6 +5,7 @@
 #include <pthread.h>
 #include "callchain.h"
 #include "header.h"
+#include "color.h"
 
 extern struct callchain_param callchain_param;
 
@@ -175,6 +176,18 @@ void perf_hpp__init(void);
 void perf_hpp__column_register(struct perf_hpp_fmt *format);
 void perf_hpp__column_enable(unsigned col);
 
+static inline size_t perf_hpp__use_color(void)
+{
+       return !symbol_conf.field_sep;
+}
+
+static inline size_t perf_hpp__color_overhead(void)
+{
+       return perf_hpp__use_color() ?
+              (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX
+              : 0;
+}
+
 struct perf_evlist;
 
 struct hist_browser_timer {
index 71b5412bbbb9d1197dc5ae999c94704211108238..2ac4bc92bb1ff2b130e47a59e43c3127ba0568b7 100644 (file)
@@ -822,6 +822,8 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
                PyObject *pyevent = pyrf_event__new(event);
                struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
 
+               perf_evlist__mmap_consume(evlist, cpu);
+
                if (pyevent == NULL)
                        return PyErr_NoMemory();
 
index cc75a3cef388065f3164fe270487d2b06d394c72..95d91a0b23afe01a45463a02dcc8eadbb5ba5169 100644 (file)
@@ -56,6 +56,17 @@ static void handler_call_die(const char *handler_name)
        Py_FatalError("problem in Python trace event handler");
 }
 
+/*
+ * Insert val into into the dictionary and decrement the reference counter.
+ * This is necessary for dictionaries since PyDict_SetItemString() does not 
+ * steal a reference, as opposed to PyTuple_SetItem().
+ */
+static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
+{
+       PyDict_SetItemString(dict, key, val);
+       Py_DECREF(val);
+}
+
 static void define_value(enum print_arg_type field_type,
                         const char *ev_name,
                         const char *field_name,
@@ -279,11 +290,11 @@ static void python_process_tracepoint(union perf_event *perf_event
                PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
                PyTuple_SetItem(t, n++, PyString_FromString(comm));
        } else {
-               PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu));
-               PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s));
-               PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns));
-               PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid));
-               PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm));
+               pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu));
+               pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s));
+               pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns));
+               pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid));
+               pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm));
        }
        for (field = event->format.fields; field; field = field->next) {
                if (field->flags & FIELD_IS_STRING) {
@@ -313,7 +324,7 @@ static void python_process_tracepoint(union perf_event *perf_event
                if (handler)
                        PyTuple_SetItem(t, n++, obj);
                else
-                       PyDict_SetItemString(dict, field->name, obj);
+                       pydict_set_item_string_decref(dict, field->name, obj);
 
        }
        if (!handler)
@@ -370,21 +381,21 @@ static void python_process_general_event(union perf_event *perf_event
        if (!handler || !PyCallable_Check(handler))
                goto exit;
 
-       PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
-       PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize(
+       pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
+       pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize(
                        (const char *)&evsel->attr, sizeof(evsel->attr)));
-       PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize(
+       pydict_set_item_string_decref(dict, "sample", PyString_FromStringAndSize(
                        (const char *)sample, sizeof(*sample)));
-       PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize(
+       pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize(
                        (const char *)sample->raw_data, sample->raw_size));
-       PyDict_SetItemString(dict, "comm",
+       pydict_set_item_string_decref(dict, "comm",
                        PyString_FromString(thread->comm));
        if (al->map) {
-               PyDict_SetItemString(dict, "dso",
+               pydict_set_item_string_decref(dict, "dso",
                        PyString_FromString(al->map->dso->name));
        }
        if (al->sym) {
-               PyDict_SetItemString(dict, "symbol",
+               pydict_set_item_string_decref(dict, "symbol",
                        PyString_FromString(al->sym->name));
        }
 
index a9dd682cf5e3f5117de017156396337a8352914f..1cf9ccb010131d42c3a573079437d87502ac016f 100644 (file)
@@ -3091,7 +3091,7 @@ static const struct file_operations *stat_fops[] = {
 
 static int kvm_init_debug(void)
 {
-       int r = -EFAULT;
+       int r = -EEXIST;
        struct kvm_stats_debugfs_item *p;
 
        kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);