]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Oct 2012 16:35:00 +0000 (09:35 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Oct 2012 16:35:00 +0000 (09:35 -0700)
Pull perf fixes from Ingo Molnar:
 "Most of the kernel diffstat relates to a group of Intel P6 and KNC
  (Xeon-Phi Knights Corner) PMU driver fixes, neither of which is in
  heavy use, so we took the fixes.

  The rest is diverse smallish fixes to the tooling and kernel side."

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86: Remove unused variable in nhmex_rbox_alter_er()
  perf/x86: Enable overflow on Intel KNC with a custom knc_pmu_handle_irq()
  perf/x86: Remove cpuc->enable check on Intl KNC event enable/disable
  perf/x86: Make Intel KNC use full 40-bit width of counters
  perf/x86/uncore: Handle pci_read_config_dword() errors
  perf/x86: Remove P6 cpuc->enabled check
  perf/x86: Update/fix generic events on P6 PMU
  perf/x86: Fix P6 FP_ASSIST event constraint
  perf, cpu hotplug: Use cached value of smp_processor_id()
  perf, cpu hotplug: Run CPU_STARTING notifiers with irqs disabled
  x86/perf: Fix virtualization sanity check
  perf test: Fix exclude_guest parse events tests
  perf tools: do not flush maps on COMM for perf report
  perf help: Fix --help for builtins
  perf trace: Check if sample raw_data field is set
  perf trace: Validate syscall id before growing syscall table

81 files changed:
Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/Makefile
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/flat.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/irq.c
arch/arm/kernel/kprobes-test-arm.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/smp.c
arch/arm/kernel/smp_twd.c
arch/arm/lib/delay.c
arch/arm/mm/alignment.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/vmregion.h
arch/arm/tools/Makefile
drivers/base/Kconfig
drivers/base/dma-coherent.c
drivers/base/dma-contiguous.c
drivers/dma/Kconfig
drivers/dma/dw_dmac_regs.h
drivers/edac/amd64_edac.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/si.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/input/tablet/wacom_wac.h
drivers/iommu/amd_iommu_init.c
drivers/iommu/tegra-smmu.c
drivers/pinctrl/core.c
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-tegra.c
drivers/pinctrl/pinctrl-tegra30.c
drivers/rtc/rtc-imxdi.c
drivers/spi/spi-pl022.c
drivers/spi/spi-rspi.c
drivers/video/backlight/Kconfig
drivers/xen/Kconfig
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/send.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/compat_ioctl.c
fs/lockd/mon.c
include/drm/drm_pciids.h
include/linux/rbtree_augmented.h
include/linux/spi/tsc2005.h
kernel/Makefile
kernel/cgroup.c
kernel/pid_namespace.c
kernel/workqueue.c
lib/genalloc.c
mm/mmu_notifier.c
mm/page_alloc.c
mm/rmap.c
net/sunrpc/xprtsock.c
security/apparmor/policy.c
security/device_cgroup.c
tools/testing/selftests/epoll/test_epoll.c
tools/vm/page-types.c
usr/gen_init_cpio.c

index c8e578263ce2654251aacde50bc1a55fe514e264..683fde93c4fb1ab9dac7d9472f1ad808202d7cf4 100644 (file)
@@ -93,7 +93,7 @@ Valid values for pin and group names are:
 
     With some exceptions, these support nvidia,high-speed-mode,
     nvidia,schmitt, nvidia,low-power-mode, nvidia,pull-down-strength,
-    nvidia,pull-up-strength, nvidia,slew_rate-rising, nvidia,slew_rate-falling.
+    nvidia,pull-up-strength, nvidia,slew-rate-rising, nvidia,slew-rate-falling.
 
     drive_ao1, drive_ao2, drive_at1, drive_at2, drive_cdev1, drive_cdev2,
     drive_csus, drive_dap1, drive_dap2, drive_dap3, drive_dap4, drive_dbg,
index c275b70349c11aedb4e8db6a1982c57bfbfcb619..6f426ed7009e5e43461770787225b398fe68e67f 100644 (file)
@@ -83,7 +83,7 @@ Valid values for pin and group names are:
   drive groups:
 
     These all support nvidia,pull-down-strength, nvidia,pull-up-strength,
-    nvidia,slew_rate-rising, nvidia,slew_rate-falling. Most but not all
+    nvidia,slew-rate-rising, nvidia,slew-rate-falling. Most but not all
     support nvidia,high-speed-mode, nvidia,schmitt, nvidia,low-power-mode.
 
     ao1, ao2, at1, at2, at3, at4, at5, cdev1, cdev2, cec, crt, csus, dap1,
index 73067efd484530d4021a220fecce329b4752b3c0..ade7e924bef5faaf00de76c0354e025ed4371b35 100644 (file)
@@ -1603,8 +1603,8 @@ config NR_CPUS
        default "4"
 
 config HOTPLUG_CPU
-       bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
-       depends on SMP && HOTPLUG && EXPERIMENTAL
+       bool "Support for hot-pluggable CPUs"
+       depends on SMP && HOTPLUG
        help
          Say Y here to experiment with turning CPUs off and on.  CPUs
          can be controlled through /sys/devices/system/cpu.
@@ -1645,8 +1645,8 @@ config HZ
        default 100
 
 config THUMB2_KERNEL
-       bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
-       depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
+       bool "Compile the kernel in Thumb-2 mode"
+       depends on CPU_V7 && !CPU_V6 && !CPU_V6K
        select AEABI
        select ARM_ASM_UNIFIED
        select ARM_UNWIND
@@ -1850,6 +1850,7 @@ config XEN_DOM0
 config XEN
        bool "Xen guest support on ARM (EXPERIMENTAL)"
        depends on EXPERIMENTAL && ARM && OF
+       depends on CPU_V7 && !CPU_V6
        help
          Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
index f023e3acdfbd8c7c17335cfa9518df05119f4ab7..5f914fca911b77b747dd43569b84a76883abcef7 100644 (file)
@@ -21,8 +21,6 @@ endif
 OBJCOPYFLAGS   :=-O binary -R .comment -S
 GZFLAGS                :=-9
 #KBUILD_CFLAGS +=-pipe
-# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
-KBUILD_CFLAGS  +=$(call cc-option,-marm,)
 
 # Never generate .eh_frame
 KBUILD_CFLAGS  += $(call cc-option,-fno-dwarf2-cfi-asm)
@@ -105,17 +103,20 @@ endif
 ifeq ($(CONFIG_THUMB2_KERNEL),y)
 AFLAGS_AUTOIT  :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
 AFLAGS_NOWARN  :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
-CFLAGS_THUMB2  :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
-AFLAGS_THUMB2  :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
+CFLAGS_ISA     :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
+AFLAGS_ISA     :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
 # Work around buggy relocation from gas if requested:
 ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
 CFLAGS_MODULE  +=-fno-optimize-sibling-calls
 endif
+else
+CFLAGS_ISA     :=$(call cc-option,-marm,)
+AFLAGS_ISA     :=$(CFLAGS_ISA)
 endif
 
 # Need -Uarm for gcc < 3.x
-KBUILD_CFLAGS  +=$(CFLAGS_ABI) $(CFLAGS_THUMB2) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
-KBUILD_AFLAGS  +=$(CFLAGS_ABI) $(AFLAGS_THUMB2) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
+KBUILD_CFLAGS  +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
+KBUILD_AFLAGS  +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
 
 CHECKFLAGS     += -D__arm__
 
index 3fdab016aa5cfdc9132813f4c5be44348fb1c539..f2aa09eb658e632c7703203d97cd3469d7996259 100644 (file)
@@ -33,7 +33,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y)
 
 $(obj)/xipImage: vmlinux FORCE
        $(call if_changed,objcopy)
-       @echo '  Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
+       $(kecho) '  Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
 
 $(obj)/Image $(obj)/zImage: FORCE
        @echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
@@ -48,14 +48,14 @@ $(obj)/xipImage: FORCE
 
 $(obj)/Image: vmlinux FORCE
        $(call if_changed,objcopy)
-       @echo '  Kernel: $@ is ready'
+       $(kecho) '  Kernel: $@ is ready'
 
 $(obj)/compressed/vmlinux: $(obj)/Image FORCE
        $(Q)$(MAKE) $(build)=$(obj)/compressed $@
 
 $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
        $(call if_changed,objcopy)
-       @echo '  Kernel: $@ is ready'
+       $(kecho) '  Kernel: $@ is ready'
 
 endif
 
@@ -90,7 +90,7 @@ fi
 $(obj)/uImage: $(obj)/zImage FORCE
        @$(check_for_multiple_loadaddr)
        $(call if_changed,uimage)
-       @echo '  Image $@ is ready'
+       $(kecho) '  Image $@ is ready'
 
 $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
        $(Q)$(MAKE) $(build)=$(obj)/bootp $@
@@ -98,7 +98,7 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
 
 $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
        $(call if_changed,objcopy)
-       @echo '  Kernel: $@ is ready'
+       $(kecho) '  Kernel: $@ is ready'
 
 PHONY += initrd FORCE
 initrd:
index 23004847bb057becd348b5d1e870235b633acedd..78d8e9b5544f59bb04dcc2068a40ea433ae81553 100644 (file)
@@ -91,6 +91,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  */
 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
+       debug_dma_mapping_error(dev, dma_addr);
        return dma_addr == DMA_ERROR_CODE;
 }
 
index 59426a4595c9cb90bcb765843f89295bd20f6797..e847d23351eda71ada1bc200476ee7f427a4e8ac 100644 (file)
@@ -8,7 +8,7 @@
 #define        flat_argvp_envp_on_stack()              1
 #define        flat_old_ram_flag(flags)                (flags)
 #define        flat_reloc_valid(reloc, size)           ((reloc) <= (size))
-#define        flat_get_addr_from_rp(rp, relval, flags, persistent) get_unaligned(rp)
+#define        flat_get_addr_from_rp(rp, relval, flags, persistent) ((void)persistent,get_unaligned(rp))
 #define        flat_put_addr_at_rp(rp, val, relval)    put_unaligned(val,rp)
 #define        flat_get_relocate_addr(rel)             (rel)
 #define        flat_set_persistent(relval, p)          0
index 77bd79f2ffdbd0344d096ca7fb2db809fb52d387..7e1f76027f666e252c35bd320d4518e110548c47 100644 (file)
@@ -200,8 +200,8 @@ extern int __put_user_8(void *, unsigned long long);
 #define USER_DS                        KERNEL_DS
 
 #define segment_eq(a,b)                (1)
-#define __addr_ok(addr)                (1)
-#define __range_ok(addr,size)  (0)
+#define __addr_ok(addr)                ((void)(addr),1)
+#define __range_ok(addr,size)  ((void)(addr),0)
 #define get_fs()               (KERNEL_DS)
 
 static inline void set_fs(mm_segment_t fs)
index 16cedb42c0c39c0169008e45488b279cd9b32821..896165096d6a936572bdcfd0d21cc23f01cb9ea2 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/list.h>
 #include <linux/kallsyms.h>
 #include <linux/proc_fs.h>
+#include <linux/export.h>
 
 #include <asm/exception.h>
 #include <asm/mach/arch.h>
@@ -109,6 +110,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
        /* Order is clear bits in "clr" then set bits in "set" */
        irq_modify_status(irq, clr, set & ~clr);
 }
+EXPORT_SYMBOL_GPL(set_irq_flags);
 
 void __init init_IRQ(void)
 {
index 38c1a3b103a0684b5b579bb74ca07f38bb91eb13..83931290506704ad226e8a8cb2a15b52a33d4494 100644 (file)
@@ -366,7 +366,9 @@ void kprobe_arm_test_cases(void)
        TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3")
        TEST_UNSUPPORTED(".word 0xe0500090 @ undef")
        TEST_UNSUPPORTED(".word 0xe05fff9f @ undef")
+#endif
 
+#if __LINUX_ARM_ARCH__ >= 7
        TEST_RRR(  "mls         r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(  "mlshi       r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
        TEST_RR(   "mls         lr, r",1, VAL2,", r",2, VAL3,", r13")
@@ -456,6 +458,8 @@ void kprobe_arm_test_cases(void)
        TEST_UNSUPPORTED(".word 0xe1700090") /* Unallocated space */
 #if __LINUX_ARM_ARCH__ >= 6
        TEST_UNSUPPORTED("ldrex r2, [sp]")
+#endif
+#if (__LINUX_ARM_ARCH__ >= 7) || defined(CONFIG_CPU_32v6K)
        TEST_UNSUPPORTED("strexd        r0, r2, r3, [sp]")
        TEST_UNSUPPORTED("ldrexd        r2, r3, [sp]")
        TEST_UNSUPPORTED("strexb        r0, r2, [sp]")
index e29c3337ca8142aeb6f4bb755e3947d6aecd4fbe..8ef8c9337809cbe5ccb91b33525c38a379e9e986 100644 (file)
@@ -45,10 +45,9 @@ int machine_kexec_prepare(struct kimage *image)
        for (i = 0; i < image->nr_segments; i++) {
                current_segment = &image->segment[i];
 
-               err = memblock_is_region_memory(current_segment->mem,
-                                               current_segment->memsz);
-               if (err)
-                       return - EINVAL;
+               if (!memblock_is_region_memory(current_segment->mem,
+                                              current_segment->memsz))
+                       return -EINVAL;
 
                err = get_user(header, (__be32*)current_segment->buf);
                if (err)
index 93971b1a4f0bb0d38eebd573f8618e994273b079..53c0304b734a4e6ad7b1806b147baa3d2d43986f 100644 (file)
@@ -96,6 +96,10 @@ armpmu_event_set_period(struct perf_event *event,
        s64 period = hwc->sample_period;
        int ret = 0;
 
+       /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
+       if (unlikely(period != hwc->last_period))
+               left = period - (hwc->last_period - left);
+
        if (unlikely(left <= -period)) {
                left = period;
                local64_set(&hwc->period_left, left);
index 8e20754dd31d5946f5297aae2023ae2c8d5cd572..fbc8b2623d82f08c8c2926375a7b8a31f943361b 100644 (file)
@@ -294,18 +294,24 @@ static void percpu_timer_setup(void);
 asmlinkage void __cpuinit secondary_start_kernel(void)
 {
        struct mm_struct *mm = &init_mm;
-       unsigned int cpu = smp_processor_id();
+       unsigned int cpu;
+
+       /*
+        * The identity mapping is uncached (strongly ordered), so
+        * switch away from it before attempting any exclusive accesses.
+        */
+       cpu_switch_mm(mm->pgd, mm);
+       enter_lazy_tlb(mm, current);
+       local_flush_tlb_all();
 
        /*
         * All kernel threads share the same mm context; grab a
         * reference and switch to it.
         */
+       cpu = smp_processor_id();
        atomic_inc(&mm->mm_count);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
-       cpu_switch_mm(mm->pgd, mm);
-       enter_lazy_tlb(mm, current);
-       local_flush_tlb_all();
 
        printk("CPU%u: Booted secondary processor\n", cpu);
 
index e1f906989bb8161963d8a714073a8d84a8e2c2e6..b22d700fea2751c60b5dbad689ee0b18305108b3 100644 (file)
@@ -42,10 +42,10 @@ static void twd_set_mode(enum clock_event_mode mode,
 
        switch (mode) {
        case CLOCK_EVT_MODE_PERIODIC:
-               /* timer load already set up */
                ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
                        | TWD_TIMER_CONTROL_PERIODIC;
-               __raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD);
+               __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+                       twd_base + TWD_TIMER_LOAD);
                break;
        case CLOCK_EVT_MODE_ONESHOT:
                /* period set, and timer enabled in 'next_event' hook */
index 9d0a30032d7f0f2ae50f0cbe9eebb80d6da827a3..0dc53854a5d8eb423e468fe255cb0f7c1fa30e15 100644 (file)
@@ -45,6 +45,7 @@ int read_current_timer(unsigned long *timer_val)
        *timer_val = delay_timer->read_current_timer();
        return 0;
 }
+EXPORT_SYMBOL_GPL(read_current_timer);
 
 static void __timer_delay(unsigned long cycles)
 {
index b9f60ebe3bc4f13f3dc13e4e4531da549890eca4..023f443784ec0b1fd438e7a9637702ef1e58fbaf 100644 (file)
@@ -856,8 +856,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                if (thumb2_32b) {
                        offset.un = 0;
                        handler = do_alignment_t32_to_handler(&instr, regs, &offset);
-               } else
+               } else {
+                       offset.un = 0;
                        handler = do_alignment_ldmstm;
+               }
                break;
 
        default:
index 477a2d23ddf17efb95af55cebbc25d35793a417d..58bc3e4d3bd0a76094d8a2a40c0be9bece4eaf7a 100644 (file)
@@ -610,7 +610,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                         gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
 {
        u64 mask = get_coherent_dma_mask(dev);
-       struct page *page;
+       struct page *page = NULL;
        void *addr;
 
 #ifdef CONFIG_DMA_API_DEBUG
index bf312c354a214761646a3a3738fb6080b84fff33..0f5a5f2a2c7bbc1bb79668ac291dde283741be7a 100644 (file)
@@ -17,7 +17,6 @@ struct arm_vmregion {
        struct list_head        vm_list;
        unsigned long           vm_start;
        unsigned long           vm_end;
-       void                    *priv;
        int                     vm_active;
        const void              *caller;
 };
index 635cb1865e4d56af08b8579eb72dbaadfa77f211..cd60a81163e938a78b55b40fa9df6009408f5b57 100644 (file)
@@ -5,6 +5,6 @@
 #
 
 include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
-       @echo '  Generating $@'
+       $(kecho) '  Generating $@'
        @mkdir -p $(dir $@)
        $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
index 08b4c520938463fd989bba879d791a3e418efd39..b34b5cda5ae110aeec980b6ec7c2ec608490a093 100644 (file)
@@ -236,7 +236,7 @@ config CMA_SIZE_PERCENTAGE
 
 choice
        prompt "Selected region size"
-       default CMA_SIZE_SEL_ABSOLUTE
+       default CMA_SIZE_SEL_MBYTES
 
 config CMA_SIZE_SEL_MBYTES
        bool "Use mega bytes value only"
index 560a7173f810015a9532d25395a8ad1a57366e6c..bc256b64102710039e208ec82c513e97b223a290 100644 (file)
@@ -191,9 +191,8 @@ EXPORT_SYMBOL(dma_release_from_coherent);
  * This checks whether the memory was allocated from the per-device
  * coherent memory pool and if so, maps that memory to the provided vma.
  *
- * Returns 1 if we correctly mapped the memory, or 0 if
- * dma_release_coherent() should proceed with mapping memory from
- * generic pools.
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
  */
 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
                           void *vaddr, size_t size, int *ret)
index 9a1469474f55addaea1ae5be1f4499187297db06..612afcc5a938f45549ed877ebd6c8ac697315fe3 100644 (file)
 #include <linux/mm.h>
 #include <linux/mutex.h>
 #include <linux/page-isolation.h>
+#include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/mm_types.h>
 #include <linux/dma-contiguous.h>
 
-#ifndef SZ_1M
-#define SZ_1M (1 << 20)
-#endif
-
 struct cma {
        unsigned long   base_pfn;
        unsigned long   count;
index 677cd6e4e1a1c1afb387217fdc9f0f363c3149cf..d4c12180c65416043dbbf3940374ad146daad881 100644 (file)
@@ -90,6 +90,17 @@ config DW_DMAC
          Support the Synopsys DesignWare AHB DMA controller.  This
          can be integrated in chips such as the Atmel AT32ap7000.
 
+config DW_DMAC_BIG_ENDIAN_IO
+       bool "Use big endian I/O register access"
+       default y if AVR32
+       depends on DW_DMAC
+       help
+         Say yes here to use big endian I/O access when reading and writing
+         to the DMA controller registers. This is needed on some platforms,
+         like the Atmel AVR32 architecture.
+
+         If unsure, use the default setting.
+
 config AT_HDMAC
        tristate "Atmel AHB DMA support"
        depends on ARCH_AT91
index ff39fa6cd2bc443a7d96fd9d85be015f643704f5..88965597b7d08c9e2b9987957f85284cc9851f4c 100644 (file)
@@ -98,9 +98,17 @@ struct dw_dma_regs {
        u32     DW_PARAMS;
 };
 
+#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+#define dma_readl_native ioread32be
+#define dma_writel_native iowrite32be
+#else
+#define dma_readl_native readl
+#define dma_writel_native writel
+#endif
+
 /* To access the registers in early stage of probe */
 #define dma_read_byaddr(addr, name) \
-       readl((addr) + offsetof(struct dw_dma_regs, name))
+       dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
 
 /* Bitfields in DW_PARAMS */
 #define DW_PARAMS_NR_CHAN      8               /* number of channels */
@@ -216,9 +224,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
 }
 
 #define channel_readl(dwc, name) \
-       readl(&(__dwc_regs(dwc)->name))
+       dma_readl_native(&(__dwc_regs(dwc)->name))
 #define channel_writel(dwc, name, val) \
-       writel((val), &(__dwc_regs(dwc)->name))
+       dma_writel_native((val), &(__dwc_regs(dwc)->name))
 
 static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
 {
@@ -246,9 +254,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
 }
 
 #define dma_readl(dw, name) \
-       readl(&(__dw_regs(dw)->name))
+       dma_readl_native(&(__dw_regs(dw)->name))
 #define dma_writel(dw, name, val) \
-       writel((val), &(__dw_regs(dw)->name))
+       dma_writel_native((val), &(__dw_regs(dw)->name))
 
 #define channel_set_bit(dw, reg, mask) \
        dma_writel(dw, reg, ((mask) << 8) | (mask))
index 5a297a26211d622b0f0ceedb0389fcbeb4223baf..cc8e7c78a23ca22972bfa570dc88d1fcf449e979 100644 (file)
@@ -170,8 +170,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
         * memory controller and apply to register. Search for the first
         * bandwidth entry that is greater or equal than the setting requested
         * and program that. If at last entry, turn off DRAM scrubbing.
+        *
+        * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
+        * by falling back to the last element in scrubrates[].
         */
-       for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
+       for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
                /*
                 * skip scrub rates which aren't recommended
                 * (see F10 BKDG, F3x58)
@@ -181,12 +184,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
 
                if (scrubrates[i].bandwidth <= new_bw)
                        break;
-
-               /*
-                * if no suitable bandwidth found, turn off DRAM scrubbing
-                * entirely by falling back to the last element in the
-                * scrubrates array.
-                */
        }
 
        scrubval = scrubrates[i].scrubval;
index 49cbb3795a102e755337a3c2105f03caedc0f2ee..ba498f8e47a211c7bc9a8e749624be6cbd8b395b 100644 (file)
@@ -184,6 +184,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
        struct radeon_backlight_privdata *pdata;
        struct radeon_encoder_atom_dig *dig;
        u8 backlight_level;
+       char bl_name[16];
 
        if (!radeon_encoder->enc_priv)
                return;
@@ -203,7 +204,9 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
        memset(&props, 0, sizeof(props));
        props.max_brightness = RADEON_MAX_BL_LEVEL;
        props.type = BACKLIGHT_RAW;
-       bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+       snprintf(bl_name, sizeof(bl_name),
+                "radeon_bl%d", dev->primary->index);
+       bd = backlight_device_register(bl_name, &drm_connector->kdev,
                                       pdata, &radeon_atom_backlight_ops, &props);
        if (IS_ERR(bd)) {
                DRM_ERROR("Backlight registration failed\n");
index 573ed1bc6cf7d36ffd1a3385cc05350fd7a3d3a0..30271b641913f3eeee72dcdc7db7146217c86109 100644 (file)
@@ -2829,6 +2829,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
        case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
                return true;
        default:
+               DRM_ERROR("Invalid register 0x%x in CS\n", reg);
                return false;
        }
 }
index 8c74c729586db21185e54465d23289a15bc4d979..81e6a568c29debcf49bb915c5d3aa19bcc88324e 100644 (file)
@@ -1538,26 +1538,31 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 {
        struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
        uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-       int i;
 
-       radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2));
-       radeon_ring_write(ring, pe);
-       radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
-       for (i = 0; i < count; ++i) {
-               uint64_t value = 0;
-               if (flags & RADEON_VM_PAGE_SYSTEM) {
-                       value = radeon_vm_map_gart(rdev, addr);
-                       value &= 0xFFFFFFFFFFFFF000ULL;
-                       addr += incr;
-
-               } else if (flags & RADEON_VM_PAGE_VALID) {
-                       value = addr;
-                       addr += incr;
-               }
+       while (count) {
+               unsigned ndw = 1 + count * 2;
+               if (ndw > 0x3FFF)
+                       ndw = 0x3FFF;
+
+               radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+               radeon_ring_write(ring, pe);
+               radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+               for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+                       uint64_t value = 0;
+                       if (flags & RADEON_VM_PAGE_SYSTEM) {
+                               value = radeon_vm_map_gart(rdev, addr);
+                               value &= 0xFFFFFFFFFFFFF000ULL;
+                               addr += incr;
+
+                       } else if (flags & RADEON_VM_PAGE_VALID) {
+                               value = addr;
+                               addr += incr;
+                       }
 
-               value |= r600_flags;
-               radeon_ring_write(ring, value);
-               radeon_ring_write(ring, upper_32_bits(value));
+                       value |= r600_flags;
+                       radeon_ring_write(ring, value);
+                       radeon_ring_write(ring, upper_32_bits(value));
+               }
        }
 }
 
@@ -1586,4 +1591,8 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        /* bits 0-7 are the VM contexts0-7 */
        radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
        radeon_ring_write(ring, 1 << vm->id);
+
+       /* sync PFP to ME, otherwise we might get invalid PFP reads */
+       radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+       radeon_ring_write(ring, 0x0);
 }
index 2423d1b5d385938f2445706abc97ab74198e1b0f..cbef6815907a13c83efff8b9e863568b1e625980 100644 (file)
 #define        PACKET3_MPEG_INDEX                              0x3A
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
+#define        PACKET3_PFP_SYNC_ME                             0x42
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
index 1aa3f910b99328e9f3de367859742bb57722732b..37f6a907aea49c7b1ff3c57628cb281d737d2e69 100644 (file)
@@ -87,7 +87,7 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
                atpx_arg_elements[1].integer.value = 0;
        }
 
-       status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer);
+       status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
 
        /* Fail only if calling the method fails and ATPX is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -373,11 +373,11 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
 }
 
 /**
- * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles
+ * radeon_atpx_pci_probe_handle - look up the ATPX handle
  *
  * @pdev: pci device
  *
- * Look up the ATPX and ATRM handles (all asics).
+ * Look up the ATPX handles (all asics).
  * Returns true if the handles are found, false if not.
  */
 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
index bd13ca09eb626efcc2c312ef68569ffcf7655db9..e2f5f888c374cc29b2f8658a5e0ba9d91325827d 100644 (file)
@@ -355,6 +355,8 @@ int radeon_wb_init(struct radeon_device *rdev)
  */
 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
 {
+       uint64_t limit = (uint64_t)radeon_vram_limit << 20;
+
        mc->vram_start = base;
        if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
                dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
@@ -368,8 +370,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
                mc->mc_vram_size = mc->aper_size;
        }
        mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
-       if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
-               mc->real_vram_size = radeon_vram_limit;
+       if (limit && limit < mc->real_vram_size)
+               mc->real_vram_size = limit;
        dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
                        mc->mc_vram_size >> 20, mc->vram_start,
                        mc->vram_end, mc->real_vram_size >> 20);
@@ -834,6 +836,19 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
                return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 }
 
+/**
+ * radeon_check_pot_argument - check that argument is a power of two
+ *
+ * @arg: value to check
+ *
+ * Validates that a certain argument is a power of two (all asics).
+ * Returns true if argument is valid.
+ */
+static bool radeon_check_pot_argument(int arg)
+{
+       return (arg & (arg - 1)) == 0;
+}
+
 /**
  * radeon_check_arguments - validate module params
  *
@@ -845,52 +860,25 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
 static void radeon_check_arguments(struct radeon_device *rdev)
 {
        /* vramlimit must be a power of two */
-       switch (radeon_vram_limit) {
-       case 0:
-       case 4:
-       case 8:
-       case 16:
-       case 32:
-       case 64:
-       case 128:
-       case 256:
-       case 512:
-       case 1024:
-       case 2048:
-       case 4096:
-               break;
-       default:
+       if (!radeon_check_pot_argument(radeon_vram_limit)) {
                dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
                                radeon_vram_limit);
                radeon_vram_limit = 0;
-               break;
        }
-       radeon_vram_limit = radeon_vram_limit << 20;
+
        /* gtt size must be power of two and greater or equal to 32M */
-       switch (radeon_gart_size) {
-       case 4:
-       case 8:
-       case 16:
+       if (radeon_gart_size < 32) {
                dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
                                radeon_gart_size);
                radeon_gart_size = 512;
-               break;
-       case 32:
-       case 64:
-       case 128:
-       case 256:
-       case 512:
-       case 1024:
-       case 2048:
-       case 4096:
-               break;
-       default:
+
+       } else if (!radeon_check_pot_argument(radeon_gart_size)) {
                dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
                                radeon_gart_size);
                radeon_gart_size = 512;
-               break;
        }
-       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+       rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
+
        /* AGP mode can only be -1, 1, 2, 4, 8 */
        switch (radeon_agpmode) {
        case -1:
index a7677dd1ce98573121a395643b31a57480e60c76..4debd60e5aa63390f0f386ee236bfc42fe4f8cf3 100644 (file)
@@ -355,14 +355,13 @@ int radeon_gart_init(struct radeon_device *rdev)
        DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
                 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
        /* Allocate pages table */
-       rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
-                                  GFP_KERNEL);
+       rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
        if (rdev->gart.pages == NULL) {
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
-       rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
-                                       rdev->gart.num_cpu_pages, GFP_KERNEL);
+       rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
+                                       rdev->gart.num_cpu_pages);
        if (rdev->gart.pages_addr == NULL) {
                radeon_gart_fini(rdev);
                return -ENOMEM;
@@ -388,8 +387,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
                radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
        }
        rdev->gart.ready = false;
-       kfree(rdev->gart.pages);
-       kfree(rdev->gart.pages_addr);
+       vfree(rdev->gart.pages);
+       vfree(rdev->gart.pages_addr);
        rdev->gart.pages = NULL;
        rdev->gart.pages_addr = NULL;
 
@@ -577,7 +576,7 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
  *
  * Global and local mutex must be locked!
  */
-int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
+static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
 {
        struct radeon_vm *vm_evict;
 
@@ -1036,8 +1035,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
                pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
                pte += (addr & mask) * 8;
 
-               if (((last_pte + 8 * count) != pte) ||
-                   ((count + nptes) > 1 << 11)) {
+               if ((last_pte + 8 * count) != pte) {
 
                        if (count) {
                                radeon_asic_vm_set_page(rdev, last_pte,
@@ -1148,17 +1146,17 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 
        if (RADEON_VM_BLOCK_SIZE > 11)
                /* reserve space for one header for every 2k dwords */
-               ndw += (nptes >> 11) * 3;
+               ndw += (nptes >> 11) * 4;
        else
                /* reserve space for one header for
                    every (1 << BLOCK_SIZE) entries */
-               ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3;
+               ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
 
        /* reserve space for pte addresses */
        ndw += nptes * 2;
 
        /* reserve space for one header for every 2k dwords */
-       ndw += (npdes >> 11) * 3;
+       ndw += (npdes >> 11) * 4;
 
        /* reserve space for pde addresses */
        ndw += npdes * 2;
index f38fbcc469358a2144791d40f910d4d900d99202..fe5c1f6b795795530075939d39c06486ce42d29d 100644 (file)
@@ -53,6 +53,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
                                struct drm_gem_object **obj)
 {
        struct radeon_bo *robj;
+       unsigned long max_size;
        int r;
 
        *obj = NULL;
@@ -60,11 +61,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
        if (alignment < PAGE_SIZE) {
                alignment = PAGE_SIZE;
        }
+
+       /* maximun bo size is the minimun btw visible vram and gtt size */
+       max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+       if (size > max_size) {
+               printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
+                      __func__, __LINE__, size >> 20, max_size >> 20);
+               return -ENOMEM;
+       }
+
+retry:
        r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
        if (r) {
-               if (r != -ERESTARTSYS)
+               if (r != -ERESTARTSYS) {
+                       if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
+                               initial_domain |= RADEON_GEM_DOMAIN_GTT;
+                               goto retry;
+                       }
                        DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
                                  size, initial_domain, alignment, r);
+               }
                return r;
        }
        *obj = &robj->gem_base;
index a13ad9d707cfcccdab04d085a62d4be0ab3690df..0063df9d166d70f5267003d24bc078093f2fdf9a 100644 (file)
@@ -370,6 +370,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
        struct backlight_properties props;
        struct radeon_backlight_privdata *pdata;
        uint8_t backlight_level;
+       char bl_name[16];
 
        if (!radeon_encoder->enc_priv)
                return;
@@ -389,7 +390,9 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
        memset(&props, 0, sizeof(props));
        props.max_brightness = RADEON_MAX_BL_LEVEL;
        props.type = BACKLIGHT_RAW;
-       bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+       snprintf(bl_name, sizeof(bl_name),
+                "radeon_bl%d", dev->primary->index);
+       bd = backlight_device_register(bl_name, &drm_connector->kdev,
                                       pdata, &radeon_backlight_ops, &props);
        if (IS_ERR(bd)) {
                DRM_ERROR("Backlight registration failed\n");
index 8b27dd6e3144566bf9cfcb218f4697cd92dbbb95..b91118ccef867f5b59effd78d0ee93d65a932f8b 100644 (file)
@@ -105,7 +105,6 @@ int radeon_bo_create(struct radeon_device *rdev,
        struct radeon_bo *bo;
        enum ttm_bo_type type;
        unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
-       unsigned long max_size = 0;
        size_t acc_size;
        int r;
 
@@ -121,18 +120,9 @@ int radeon_bo_create(struct radeon_device *rdev,
        }
        *bo_ptr = NULL;
 
-       /* maximun bo size is the minimun btw visible vram and gtt size */
-       max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
-       if ((page_align << PAGE_SHIFT) >= max_size) {
-               printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
-                       __func__, __LINE__, page_align  >> (20 - PAGE_SHIFT), max_size >> 20);
-               return -ENOMEM;
-       }
-
        acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
                                       sizeof(struct radeon_bo));
 
-retry:
        bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
@@ -154,15 +144,6 @@ retry:
                        acc_size, sg, &radeon_ttm_bo_destroy);
        up_read(&rdev->pm.mclk_lock);
        if (unlikely(r != 0)) {
-               if (r != -ERESTARTSYS) {
-                       if (domain == RADEON_GEM_DOMAIN_VRAM) {
-                               domain |= RADEON_GEM_DOMAIN_GTT;
-                               goto retry;
-                       }
-                       dev_err(rdev->dev,
-                               "object_init failed for (%lu, 0x%08X)\n",
-                               size, domain);
-               }
                return r;
        }
        *bo_ptr = bo;
index df8dd77016436373dcfcfce4e3fb3e8410a3d873..b0db712060fb3876dfc8f6b92c93acb04bee751a 100644 (file)
@@ -2808,26 +2808,31 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 {
        struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
        uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-       int i;
-       uint64_t value;
 
-       radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-                                WRITE_DATA_DST_SEL(1)));
-       radeon_ring_write(ring, pe);
-       radeon_ring_write(ring, upper_32_bits(pe));
-       for (i = 0; i < count; ++i) {
-               if (flags & RADEON_VM_PAGE_SYSTEM) {
-                       value = radeon_vm_map_gart(rdev, addr);
-                       value &= 0xFFFFFFFFFFFFF000ULL;
-               } else if (flags & RADEON_VM_PAGE_VALID)
-                       value = addr;
-               else
-                       value = 0;
-               addr += incr;
-               value |= r600_flags;
-               radeon_ring_write(ring, value);
-               radeon_ring_write(ring, upper_32_bits(value));
+       while (count) {
+               unsigned ndw = 2 + count * 2;
+               if (ndw > 0x3FFE)
+                       ndw = 0x3FFE;
+
+               radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+               radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                                        WRITE_DATA_DST_SEL(1)));
+               radeon_ring_write(ring, pe);
+               radeon_ring_write(ring, upper_32_bits(pe));
+               for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+                       uint64_t value;
+                       if (flags & RADEON_VM_PAGE_SYSTEM) {
+                               value = radeon_vm_map_gart(rdev, addr);
+                               value &= 0xFFFFFFFFFFFFF000ULL;
+                       } else if (flags & RADEON_VM_PAGE_VALID)
+                               value = addr;
+                       else
+                               value = 0;
+                       addr += incr;
+                       value |= r600_flags;
+                       radeon_ring_write(ring, value);
+                       radeon_ring_write(ring, upper_32_bits(value));
+               }
        }
 }
 
@@ -2868,6 +2873,10 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 1 << vm->id);
+
+       /* sync PFP to ME, otherwise we might get invalid PFP reads */
+       radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+       radeon_ring_write(ring, 0x0);
 }
 
 /*
index 9edf9806cff9db6350e4e25677fb74177364251a..2c1e12bf2ab424e87f49ed14e57e9b9125aa9831 100644 (file)
@@ -391,7 +391,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
                                                        features->pktlen = WACOM_PKGLEN_TPC2FG;
                                                }
 
-                                               if (features->type == MTSCREEN)
+                                               if (features->type == MTSCREEN || WACOM_24HDT)
                                                        features->pktlen = WACOM_PKGLEN_MTOUCH;
 
                                                if (features->type == BAMBOO_PT) {
@@ -402,6 +402,14 @@ static int wacom_parse_hid(struct usb_interface *intf,
                                                        features->x_max =
                                                                get_unaligned_le16(&report[i + 8]);
                                                        i += 15;
+                                               } else if (features->type == WACOM_24HDT) {
+                                                       features->x_max =
+                                                               get_unaligned_le16(&report[i + 3]);
+                                                       features->x_phy =
+                                                               get_unaligned_le16(&report[i + 8]);
+                                                       features->unit = report[i - 1];
+                                                       features->unitExpo = report[i - 3];
+                                                       i += 12;
                                                } else {
                                                        features->x_max =
                                                                get_unaligned_le16(&report[i + 3]);
@@ -434,6 +442,12 @@ static int wacom_parse_hid(struct usb_interface *intf,
                                                        features->y_phy =
                                                                get_unaligned_le16(&report[i + 6]);
                                                        i += 7;
+                                               } else if (type == WACOM_24HDT) {
+                                                       features->y_max =
+                                                               get_unaligned_le16(&report[i + 3]);
+                                                       features->y_phy =
+                                                               get_unaligned_le16(&report[i - 2]);
+                                                       i += 7;
                                                } else if (type == BAMBOO_PT) {
                                                        features->y_phy =
                                                                get_unaligned_le16(&report[i + 3]);
@@ -541,6 +555,9 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
                        /* MT Tablet PC touch */
                        return wacom_set_device_mode(intf, 3, 4, 4);
                }
+               else if (features->type == WACOM_24HDT) {
+                       return wacom_set_device_mode(intf, 18, 3, 2);
+               }
        } else if (features->device_type == BTN_TOOL_PEN) {
                if (features->type <= BAMBOO_PT && features->type != WIRELESS) {
                        return wacom_set_device_mode(intf, 2, 2, 2);
@@ -613,6 +630,30 @@ struct wacom_usbdev_data {
 static LIST_HEAD(wacom_udev_list);
 static DEFINE_MUTEX(wacom_udev_list_lock);
 
+static struct usb_device *wacom_get_sibling(struct usb_device *dev, int vendor, int product)
+{
+       int port1;
+       struct usb_device *sibling;
+
+       if (vendor == 0 && product == 0)
+               return dev;
+
+       if (dev->parent == NULL)
+               return NULL;
+
+       usb_hub_for_each_child(dev->parent, port1, sibling) {
+               struct usb_device_descriptor *d;
+               if (sibling == NULL)
+                       continue;
+
+               d = &sibling->descriptor;
+               if (d->idVendor == vendor && d->idProduct == product)
+                       return sibling;
+       }
+
+       return NULL;
+}
+
 static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev)
 {
        struct wacom_usbdev_data *data;
@@ -1257,13 +1298,19 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
        strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
 
        if (features->quirks & WACOM_QUIRK_MULTI_INPUT) {
+               struct usb_device *other_dev;
+
                /* Append the device type to the name */
                strlcat(wacom_wac->name,
                        features->device_type == BTN_TOOL_PEN ?
                                " Pen" : " Finger",
                        sizeof(wacom_wac->name));
 
-               error = wacom_add_shared_data(wacom_wac, dev);
+
+               other_dev = wacom_get_sibling(dev, features->oVid, features->oPid);
+               if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL)
+                       other_dev = dev;
+               error = wacom_add_shared_data(wacom_wac, other_dev);
                if (error)
                        goto fail3;
        }
index c3468c8dbd891865b0fd2b4a8159f499ea8df1f1..aa6010131179588bb75ea3326843df499db7aaf1 100644 (file)
@@ -806,6 +806,70 @@ static int find_slot_from_contactid(struct wacom_wac *wacom, int contactid)
        return -1;
 }
 
+static int int_dist(int x1, int y1, int x2, int y2)
+{
+       int x = x2 - x1;
+       int y = y2 - y1;
+
+       return int_sqrt(x*x + y*y);
+}
+
+static int wacom_24hdt_irq(struct wacom_wac *wacom)
+{
+       struct input_dev *input = wacom->input;
+       char *data = wacom->data;
+       int i;
+       int current_num_contacts = data[61];
+       int contacts_to_send = 0;
+
+       /*
+        * First packet resets the counter since only the first
+        * packet in series will have non-zero current_num_contacts.
+        */
+       if (current_num_contacts)
+               wacom->num_contacts_left = current_num_contacts;
+
+       /* There are at most 4 contacts per packet */
+       contacts_to_send = min(4, wacom->num_contacts_left);
+
+       for (i = 0; i < contacts_to_send; i++) {
+               int offset = (WACOM_BYTES_PER_24HDT_PACKET * i) + 1;
+               bool touch = data[offset] & 0x1 && !wacom->shared->stylus_in_proximity;
+               int id = data[offset + 1];
+               int slot = find_slot_from_contactid(wacom, id);
+
+               if (slot < 0)
+                       continue;
+               input_mt_slot(input, slot);
+               input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+
+               if (touch) {
+                       int t_x = le16_to_cpup((__le16 *)&data[offset + 2]);
+                       int c_x = le16_to_cpup((__le16 *)&data[offset + 4]);
+                       int t_y = le16_to_cpup((__le16 *)&data[offset + 6]);
+                       int c_y = le16_to_cpup((__le16 *)&data[offset + 8]);
+                       int w = le16_to_cpup((__le16 *)&data[offset + 10]);
+                       int h = le16_to_cpup((__le16 *)&data[offset + 12]);
+
+                       input_report_abs(input, ABS_MT_POSITION_X, t_x);
+                       input_report_abs(input, ABS_MT_POSITION_Y, t_y);
+                       input_report_abs(input, ABS_MT_TOUCH_MAJOR, min(w,h));
+                       input_report_abs(input, ABS_MT_WIDTH_MAJOR, min(w, h) + int_dist(t_x, t_y, c_x, c_y));
+                       input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
+                       input_report_abs(input, ABS_MT_ORIENTATION, w > h);
+               }
+               wacom->slots[slot] = touch ? id : -1;
+       }
+
+       input_mt_report_pointer_emulation(input, true);
+
+       wacom->num_contacts_left -= contacts_to_send;
+       if (wacom->num_contacts_left <= 0)
+               wacom->num_contacts_left = 0;
+
+       return 1;
+}
+
 static int wacom_mt_touch(struct wacom_wac *wacom)
 {
        struct input_dev *input = wacom->input;
@@ -1255,6 +1319,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
                sync = wacom_intuos_irq(wacom_wac);
                break;
 
+       case WACOM_24HDT:
+               sync = wacom_24hdt_irq(wacom_wac);
+               break;
+
        case INTUOS5S:
        case INTUOS5:
        case INTUOS5L:
@@ -1340,7 +1408,8 @@ void wacom_setup_device_quirks(struct wacom_features *features)
 
        /* these device have multiple inputs */
        if (features->type >= WIRELESS ||
-           (features->type >= INTUOS5S && features->type <= INTUOS5L))
+           (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
+           (features->oVid && features->oPid))
                features->quirks |= WACOM_QUIRK_MULTI_INPUT;
 
        /* quirk for bamboo touch with 2 low res touches */
@@ -1575,6 +1644,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                break;
 
+       case WACOM_24HDT:
+               if (features->device_type == BTN_TOOL_FINGER) {
+                       input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0);
+                       input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0);
+                       input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0);
+                       input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+               }
+               /* fall through */
+
        case MTSCREEN:
                if (features->device_type == BTN_TOOL_FINGER) {
                        wacom_wac->slots = kmalloc(features->touch_max *
@@ -1869,8 +1947,11 @@ static const struct wacom_features wacom_features_0xF4 =
        { "Wacom Cintiq 24HD",       WACOM_PKGLEN_INTUOS,   104480, 65600, 2047,
          63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0xF8 =
-       { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS,   104480, 65600, 2047,
-         63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+       { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS,   104480, 65600, 2047, /* Pen */
+         63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
+static const struct wacom_features wacom_features_0xF6 =
+       { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
+         .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 };
 static const struct wacom_features wacom_features_0x3F =
        { "Wacom Cintiq 21UX",    WACOM_PKGLEN_INTUOS,    87200, 65600, 1023,
          63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2113,6 +2194,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_WACOM(0xF4) },
        { USB_DEVICE_WACOM(0xF8) },
+       { USB_DEVICE_WACOM(0xF6) },
        { USB_DEVICE_WACOM(0xFA) },
        { USB_DEVICE_LENOVO(0x6004) },
        { }
index 96c185cc301eb95bf062440e59c49114458c0028..345f1e76975ef76ef6b251ae772a7c2b807c21ff 100644 (file)
@@ -29,6 +29,7 @@
 
 /* wacom data size per MT contact */
 #define WACOM_BYTES_PER_MT_PACKET      11
+#define WACOM_BYTES_PER_24HDT_PACKET   14
 
 /* device IDs */
 #define STYLUS_DEVICE_ID       0x02
@@ -49,6 +50,7 @@
 #define WACOM_REPORT_TPCHID            15
 #define WACOM_REPORT_TPCST             16
 #define WACOM_REPORT_TPC1FGE           18
+#define WACOM_REPORT_24HDT             1
 
 /* device quirks */
 #define WACOM_QUIRK_MULTI_INPUT                0x0001
@@ -81,6 +83,7 @@ enum {
        WACOM_MO,
        WIRELESS,
        BAMBOO_PT,
+       WACOM_24HDT,
        TABLETPC,   /* add new TPC below */
        TABLETPCE,
        TABLETPC2FG,
@@ -109,6 +112,8 @@ struct wacom_features {
        int distance_fuzz;
        unsigned quirks;
        unsigned touch_max;
+       int oVid;
+       int oPid;
 };
 
 struct wacom_shared {
index 18b0d99bd4d6686b3727f83896769d0a116dcf1f..81837b0710a9ba0240b1eba3c1b3f8b51d997e2f 100644 (file)
@@ -1599,21 +1599,46 @@ static void __init free_on_init_error(void)
 #endif
 }
 
+/* SB IOAPIC is always on this device in AMD systems */
+#define IOAPIC_SB_DEVID                ((0x00 << 8) | PCI_DEVFN(0x14, 0))
+
 static bool __init check_ioapic_information(void)
 {
+       bool ret, has_sb_ioapic;
        int idx;
 
-       for (idx = 0; idx < nr_ioapics; idx++) {
-               int id = mpc_ioapic_id(idx);
+       has_sb_ioapic = false;
+       ret           = false;
 
-               if (get_ioapic_devid(id) < 0) {
-                       pr_err(FW_BUG "AMD-Vi: IO-APIC[%d] not in IVRS table\n", id);
-                       pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug\n");
-                       return false;
+       for (idx = 0; idx < nr_ioapics; idx++) {
+               int devid, id = mpc_ioapic_id(idx);
+
+               devid = get_ioapic_devid(id);
+               if (devid < 0) {
+                       pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id);
+                       ret = false;
+               } else if (devid == IOAPIC_SB_DEVID) {
+                       has_sb_ioapic = true;
+                       ret           = true;
                }
        }
 
-       return true;
+       if (!has_sb_ioapic) {
+               /*
+                * We expect the SB IOAPIC to be listed in the IVRS
+                * table. The system timer is connected to the SB IOAPIC
+                * and if we don't have it in the list the system will
+                * panic at boot time.  This situation usually happens
+                * when the BIOS is buggy and provides us the wrong
+                * device id for the IOAPIC in the system.
+                */
+               pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n");
+       }
+
+       if (!ret)
+               pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n");
+
+       return ret;
 }
 
 static void __init free_dma_resources(void)
index 0b4d62e0c64573cea96dc38f237436a036aa74cf..a649f146d17bad0b62d15a1d174c57147a9a2a2f 100644 (file)
@@ -200,7 +200,7 @@ enum {
 
 #define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
 #define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
-#define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22)
+#define SMMU_PDN_TO_ADDR(pdn)  ((pdn) << 22)
 
 #define _READABLE      (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
 #define _WRITABLE      (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
index 0f1ec9e8ff14b02448877cb4f4872f45baeeccd3..2e39c04fc16bb4be31dba8800477aafed2e64441 100644 (file)
@@ -1061,8 +1061,10 @@ static int pinctrl_groups_show(struct seq_file *s, void *what)
                        seq_printf(s, "group: %s\n", gname);
                        for (i = 0; i < num_pins; i++) {
                                pname = pin_get_name(pctldev, pins[i]);
-                               if (WARN_ON(!pname))
+                               if (WARN_ON(!pname)) {
+                                       mutex_unlock(&pinctrl_mutex);
                                        return -EINVAL;
+                               }
                                seq_printf(s, "pin %d (%s)\n", pins[i], pname);
                        }
                        seq_puts(s, "\n");
index 43f474cdc11082717c881b757c6bcaf104bcbb97..baee2cc46a17a7c5c328f98c7eaa25f8f10d8fba 100644 (file)
@@ -537,8 +537,6 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
        seq_puts(s, "Pin config settings per pin group\n");
        seq_puts(s, "Format: group (name): configs\n");
 
-       mutex_lock(&pinctrl_mutex);
-
        while (selector < ngroups) {
                const char *gname = pctlops->get_group_name(pctldev, selector);
 
@@ -549,8 +547,6 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
                selector++;
        }
 
-       mutex_unlock(&pinctrl_mutex);
-
        return 0;
 }
 
index 01aea1c3b5fa3188a50cfe4e41ea13e48c50f56d..cf82d9ce4deeccc8c19c6104103fee84854c945a 100644 (file)
@@ -1056,7 +1056,7 @@ static int nmk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
        struct nmk_gpio_chip *nmk_chip =
                container_of(chip, struct nmk_gpio_chip, chip);
 
-       return irq_find_mapping(nmk_chip->domain, offset);
+       return irq_create_mapping(nmk_chip->domain, offset);
 }
 
 #ifdef CONFIG_DEBUG_FS
@@ -1281,7 +1281,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
        struct clk *clk;
        int secondary_irq;
        void __iomem *base;
-       int irq_start = -1;
+       int irq_start = 0;
        int irq;
        int ret;
 
@@ -1387,7 +1387,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
 
        if (!np)
                irq_start = NOMADIK_GPIO_TO_IRQ(pdata->first_gpio);
-       nmk_chip->domain = irq_domain_add_simple(NULL,
+       nmk_chip->domain = irq_domain_add_simple(np,
                                NMK_GPIO_PER_CHIP, irq_start,
                                &nmk_gpio_irq_simple_ops, nmk_chip);
        if (!nmk_chip->domain) {
index 729b686c3ad2338e2d4878ed0e1623674ba87f3f..7da0b371fd652a04ac7d55ecf89c55c915d19f8b 100644 (file)
@@ -464,7 +464,7 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
                *bank = g->drv_bank;
                *reg = g->drv_reg;
                *bit = g->lpmd_bit;
-               *width = 1;
+               *width = 2;
                break;
        case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH:
                *bank = g->drv_bank;
index 0386fdf0da16441ecc874b1710e55a929d44325a..7894f14c70590058726a6c07da4286e1640058db 100644 (file)
@@ -3345,10 +3345,10 @@ static const struct tegra_function tegra30_functions[] = {
        FUNCTION(vi_alt3),
 };
 
-#define MUXCTL_REG_A   0x3000
-#define PINGROUP_REG_A 0x868
+#define DRV_PINGROUP_REG_A     0x868   /* bank 0 */
+#define PINGROUP_REG_A         0x3000  /* bank 1 */
 
-#define PINGROUP_REG_Y(r) ((r) - MUXCTL_REG_A)
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
 #define PINGROUP_REG_N(r) -1
 
 #define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior)  \
@@ -3364,25 +3364,25 @@ static const struct tegra_function tegra30_functions[] = {
                },                                              \
                .func_safe = TEGRA_MUX_ ## f_safe,              \
                .mux_reg = PINGROUP_REG_Y(r),                   \
-               .mux_bank = 0,                                  \
+               .mux_bank = 1,                                  \
                .mux_bit = 0,                                   \
                .pupd_reg = PINGROUP_REG_Y(r),                  \
-               .pupd_bank = 0,                                 \
+               .pupd_bank = 1,                                 \
                .pupd_bit = 2,                                  \
                .tri_reg = PINGROUP_REG_Y(r),                   \
-               .tri_bank = 0,                                  \
+               .tri_bank = 1,                                  \
                .tri_bit = 4,                                   \
                .einput_reg = PINGROUP_REG_Y(r),                \
-               .einput_bank = 0,                               \
+               .einput_bank = 1,                               \
                .einput_bit = 5,                                \
                .odrain_reg = PINGROUP_REG_##od(r),             \
-               .odrain_bank = 0,                               \
+               .odrain_bank = 1,                               \
                .odrain_bit = 6,                                \
                .lock_reg = PINGROUP_REG_Y(r),                  \
-               .lock_bank = 0,                                 \
+               .lock_bank = 1,                                 \
                .lock_bit = 7,                                  \
                .ioreset_reg = PINGROUP_REG_##ior(r),           \
-               .ioreset_bank = 0,                              \
+               .ioreset_bank = 1,                              \
                .ioreset_bit = 8,                               \
                .drv_reg = -1,                                  \
        }
@@ -3401,8 +3401,8 @@ static const struct tegra_function tegra30_functions[] = {
                .odrain_reg = -1,                               \
                .lock_reg = -1,                                 \
                .ioreset_reg = -1,                              \
-               .drv_reg = ((r) - PINGROUP_REG_A),              \
-               .drv_bank = 1,                                  \
+               .drv_reg = ((r) - DRV_PINGROUP_REG_A),          \
+               .drv_bank = 0,                                  \
                .hsm_bit = hsm_b,                               \
                .schmitt_bit = schmitt_b,                       \
                .lpmd_bit = lpmd_b,                             \
index 891cd6c61d0ae76112ccc0f6b6da3876d21569ad..4eed51044c5dd331f576fc1b1b71655e5f932366 100644 (file)
@@ -392,6 +392,8 @@ static int dryice_rtc_probe(struct platform_device *pdev)
        if (imxdi->ioaddr == NULL)
                return -ENOMEM;
 
+       spin_lock_init(&imxdi->irq_lock);
+
        imxdi->irq = platform_get_irq(pdev, 0);
        if (imxdi->irq < 0)
                return imxdi->irq;
index 919464102d33afb2d493ace07c81c80403d1d51d..a1db91a99b89a670cb7979c5fd758c90796aa2a9 100644 (file)
@@ -2186,8 +2186,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
        printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
               adev->res.start, pl022->virtbase);
 
-       pm_runtime_resume(dev);
-
        pl022->clk = devm_clk_get(&adev->dev, NULL);
        if (IS_ERR(pl022->clk)) {
                status = PTR_ERR(pl022->clk);
@@ -2292,7 +2290,6 @@ pl022_remove(struct amba_device *adev)
 
        clk_disable(pl022->clk);
        clk_unprepare(pl022->clk);
-       pm_runtime_disable(&adev->dev);
        amba_release_regions(adev);
        tasklet_disable(&pl022->pump_transfers);
        spi_unregister_master(pl022->master);
index 4894bde4bbffea3f65d7ff30db4277c92ab79587..30faf6d4ab9135c84ecd8f359626ddd75e896b60 100644 (file)
@@ -147,8 +147,6 @@ struct rspi_data {
        unsigned char spsr;
 
        /* for dmaengine */
-       struct sh_dmae_slave dma_tx;
-       struct sh_dmae_slave dma_rx;
        struct dma_chan *chan_tx;
        struct dma_chan *chan_rx;
        int irq;
@@ -663,20 +661,16 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
        return ret;
 }
 
-static bool rspi_filter(struct dma_chan *chan, void *filter_param)
-{
-       chan->private = filter_param;
-       return true;
-}
-
-static void __devinit rspi_request_dma(struct rspi_data *rspi,
-                                      struct platform_device *pdev)
+static int __devinit rspi_request_dma(struct rspi_data *rspi,
+                                     struct platform_device *pdev)
 {
        struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
        dma_cap_mask_t mask;
+       struct dma_slave_config cfg;
+       int ret;
 
        if (!rspi_pd)
-               return;
+               return 0;       /* The driver assumes no error. */
 
        rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
 
@@ -684,21 +678,35 @@ static void __devinit rspi_request_dma(struct rspi_data *rspi,
        if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
                dma_cap_zero(mask);
                dma_cap_set(DMA_SLAVE, mask);
-               rspi->dma_rx.slave_id = rspi_pd->dma_rx_id;
-               rspi->chan_rx = dma_request_channel(mask, rspi_filter,
-                                                   &rspi->dma_rx);
-               if (rspi->chan_rx)
-                       dev_info(&pdev->dev, "Use DMA when rx.\n");
+               rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
+                                                   (void *)rspi_pd->dma_rx_id);
+               if (rspi->chan_rx) {
+                       cfg.slave_id = rspi_pd->dma_rx_id;
+                       cfg.direction = DMA_DEV_TO_MEM;
+                       ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
+                       if (!ret)
+                               dev_info(&pdev->dev, "Use DMA when rx.\n");
+                       else
+                               return ret;
+               }
        }
        if (rspi_pd->dma_tx_id) {
                dma_cap_zero(mask);
                dma_cap_set(DMA_SLAVE, mask);
-               rspi->dma_tx.slave_id = rspi_pd->dma_tx_id;
-               rspi->chan_tx = dma_request_channel(mask, rspi_filter,
-                                                   &rspi->dma_tx);
-               if (rspi->chan_tx)
-                       dev_info(&pdev->dev, "Use DMA when tx\n");
+               rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
+                                                   (void *)rspi_pd->dma_tx_id);
+               if (rspi->chan_tx) {
+                       cfg.slave_id = rspi_pd->dma_tx_id;
+                       cfg.direction = DMA_MEM_TO_DEV;
+                       ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
+                       if (!ret)
+                               dev_info(&pdev->dev, "Use DMA when tx\n");
+                       else
+                               return ret;
+               }
        }
+
+       return 0;
 }
 
 static void __devexit rspi_release_dma(struct rspi_data *rspi)
@@ -788,7 +796,11 @@ static int __devinit rspi_probe(struct platform_device *pdev)
        }
 
        rspi->irq = irq;
-       rspi_request_dma(rspi, pdev);
+       ret = rspi_request_dma(rspi, pdev);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "rspi_request_dma failed.\n");
+               goto error4;
+       }
 
        ret = spi_register_master(master);
        if (ret < 0) {
index c101697a4ba708e06b2ea102ba6000d97eaa0ed8..765a945f8ea12ef2e1a9fc80c428a885f9806320 100644 (file)
@@ -60,7 +60,8 @@ config LCD_LTV350QV
          The LTV350QV panel is present on all ATSTK1000 boards.
 
 config LCD_ILI9320
-       tristate
+       tristate "ILI Technology ILI9320 controller support"
+       depends on SPI
        help
          If you have a panel based on the ILI9320 controller chip
          then say y to include a power driver for it.
index d4dffcd528730330a0d4cff238dcf0b56e3f318b..126d8ce591ce5fc7e69348f746e91e60b04bc4ee 100644 (file)
@@ -3,6 +3,7 @@ menu "Xen driver support"
 
 config XEN_BALLOON
        bool "Xen memory balloon driver"
+       depends on !ARM
        default y
        help
          The balloon driver allows the Xen domain to request more memory from
@@ -145,6 +146,7 @@ config SWIOTLB_XEN
 
 config XEN_TMEM
        bool
+       depends on !ARM
        default y if (CLEANCACHE || FRONTSWAP)
        help
          Shim to interface in-kernel Transcendent Memory hooks
index f3187938e081c7dcbf842d424f5de6ed06d3f93f..208d8aa5b07e488f1f39cb0877ff46bb5d08d5a6 100644 (file)
@@ -283,9 +283,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                goto out;
        }
 
-       rcu_read_lock();
-       root_level = btrfs_header_level(root->node);
-       rcu_read_unlock();
+       root_level = btrfs_old_root_level(root, time_seq);
 
        if (root_level + 1 == level)
                goto out;
@@ -1177,16 +1175,15 @@ int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
        return ret;
 }
 
-static char *ref_to_path(struct btrfs_root *fs_root,
-                        struct btrfs_path *path,
-                        u32 name_len, unsigned long name_off,
-                        struct extent_buffer *eb_in, u64 parent,
-                        char *dest, u32 size)
+char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+                       u32 name_len, unsigned long name_off,
+                       struct extent_buffer *eb_in, u64 parent,
+                       char *dest, u32 size)
 {
        int slot;
        u64 next_inum;
        int ret;
-       s64 bytes_left = size - 1;
+       s64 bytes_left = ((s64)size) - 1;
        struct extent_buffer *eb = eb_in;
        struct btrfs_key found_key;
        int leave_spinning = path->leave_spinning;
@@ -1266,10 +1263,10 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root,
                         struct extent_buffer *eb_in, u64 parent,
                         char *dest, u32 size)
 {
-       return ref_to_path(fs_root, path,
-                          btrfs_inode_ref_name_len(eb_in, iref),
-                          (unsigned long)(iref + 1),
-                          eb_in, parent, dest, size);
+       return btrfs_ref_to_path(fs_root, path,
+                                btrfs_inode_ref_name_len(eb_in, iref),
+                                (unsigned long)(iref + 1),
+                                eb_in, parent, dest, size);
 }
 
 /*
@@ -1715,9 +1712,8 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
                                        ipath->fspath->bytes_left - s_ptr : 0;
 
        fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
-       fspath = ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
-                            name_off, eb, inum, fspath_min,
-                            bytes_left);
+       fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
+                                  name_off, eb, inum, fspath_min, bytes_left);
        if (IS_ERR(fspath))
                return PTR_ERR(fspath);
 
index e75533043a5ffbab21ff133877c352b743ef6592..d61feca79455bda94308c9ab3608b23409a84c73 100644 (file)
@@ -62,6 +62,10 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
 char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
                         struct btrfs_inode_ref *iref, struct extent_buffer *eb,
                         u64 parent, char *dest, u32 size);
+char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+                       u32 name_len, unsigned long name_off,
+                       struct extent_buffer *eb_in, u64 parent,
+                       char *dest, u32 size);
 
 struct btrfs_data_container *init_data_container(u32 total_bytes);
 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
index b334362110003165a72b63433b192f2b481c3b01..cdfb4c49a806ad4ba0ebe83b5569a4121a10a220 100644 (file)
@@ -596,6 +596,11 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
        if (tree_mod_dont_log(fs_info, eb))
                return 0;
 
+       /*
+        * When we override something during the move, we log these removals.
+        * This can only happen when we move towards the beginning of the
+        * buffer, i.e. dst_slot < src_slot.
+        */
        for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
                ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
                                              MOD_LOG_KEY_REMOVE_WHILE_MOVING);
@@ -647,8 +652,6 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
        if (tree_mod_dont_log(fs_info, NULL))
                return 0;
 
-       __tree_mod_log_free_eb(fs_info, old_root);
-
        ret = tree_mod_alloc(fs_info, flags, &tm);
        if (ret < 0)
                goto out;
@@ -926,12 +929,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                        ret = btrfs_dec_ref(trans, root, buf, 1, 1);
                        BUG_ON(ret); /* -ENOMEM */
                }
-               /*
-                * don't log freeing in case we're freeing the root node, this
-                * is done by tree_mod_log_set_root_pointer later
-                */
-               if (buf != root->node && btrfs_header_level(buf) != 0)
-                       tree_mod_log_free_eb(root->fs_info, buf);
+               tree_mod_log_free_eb(root->fs_info, buf);
                clean_tree_block(trans, root, buf);
                *last_ref = 1;
        }
@@ -1225,6 +1223,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
        free_extent_buffer(eb);
 
        __tree_mod_log_rewind(eb_rewin, time_seq, tm);
+       WARN_ON(btrfs_header_nritems(eb_rewin) >
+               BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
 
        return eb_rewin;
 }
@@ -1241,9 +1241,11 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
 {
        struct tree_mod_elem *tm;
        struct extent_buffer *eb;
+       struct extent_buffer *old;
        struct tree_mod_root *old_root = NULL;
        u64 old_generation = 0;
        u64 logical;
+       u32 blocksize;
 
        eb = btrfs_read_lock_root_node(root);
        tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
@@ -1259,14 +1261,32 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        }
 
        tm = tree_mod_log_search(root->fs_info, logical, time_seq);
-       if (old_root)
+       if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
+               btrfs_tree_read_unlock(root->node);
+               free_extent_buffer(root->node);
+               blocksize = btrfs_level_size(root, old_root->level);
+               old = read_tree_block(root, logical, blocksize, 0);
+               if (!old) {
+                       pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
+                               logical);
+                       WARN_ON(1);
+               } else {
+                       eb = btrfs_clone_extent_buffer(old);
+                       free_extent_buffer(old);
+               }
+       } else if (old_root) {
+               btrfs_tree_read_unlock(root->node);
+               free_extent_buffer(root->node);
                eb = alloc_dummy_extent_buffer(logical, root->nodesize);
-       else
+       } else {
                eb = btrfs_clone_extent_buffer(root->node);
-       btrfs_tree_read_unlock(root->node);
-       free_extent_buffer(root->node);
+               btrfs_tree_read_unlock(root->node);
+               free_extent_buffer(root->node);
+       }
+
        if (!eb)
                return NULL;
+       extent_buffer_get(eb);
        btrfs_tree_read_lock(eb);
        if (old_root) {
                btrfs_set_header_bytenr(eb, eb->start);
@@ -1279,11 +1299,28 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
                __tree_mod_log_rewind(eb, time_seq, tm);
        else
                WARN_ON(btrfs_header_level(eb) != 0);
-       extent_buffer_get(eb);
+       WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
 
        return eb;
 }
 
+int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
+{
+       struct tree_mod_elem *tm;
+       int level;
+
+       tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+       if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
+               level = tm->old_root.level;
+       } else {
+               rcu_read_lock();
+               level = btrfs_header_level(root->node);
+               rcu_read_unlock();
+       }
+
+       return level;
+}
+
 static inline int should_cow_block(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root,
                                   struct extent_buffer *buf)
@@ -1725,6 +1762,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                        goto enospc;
                }
 
+               tree_mod_log_free_eb(root->fs_info, root->node);
                tree_mod_log_set_root_pointer(root, child);
                rcu_assign_pointer(root->node, child);
 
@@ -2970,8 +3008,10 @@ static int push_node_left(struct btrfs_trans_handle *trans,
                           push_items * sizeof(struct btrfs_key_ptr));
 
        if (push_items < src_nritems) {
-               tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
-                                    src_nritems - push_items);
+               /*
+                * don't call tree_mod_log_eb_move here, key removal was already
+                * fully logged by tree_mod_log_eb_copy above.
+                */
                memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
                                      btrfs_node_key_ptr_offset(push_items),
                                      (src_nritems - push_items) *
index 926c9ffc66d93324d155481c4ecba13d27fa3fec..c72ead869507412ac9939c0748a482035baaf2aa 100644 (file)
@@ -3120,6 +3120,7 @@ static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
 {
        return atomic_inc_return(&fs_info->tree_mod_seq);
 }
+int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
 
 /* root-item.c */
 int btrfs_find_root_ref(struct btrfs_root *tree_root,
@@ -3338,6 +3339,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 int btrfs_update_inode(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
                              struct inode *inode);
+int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root, struct inode *inode);
 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
 int btrfs_orphan_cleanup(struct btrfs_root *root);
index 8036d3a848530daed167453ebfb68eb8a83950f3..472873a94d969a86967e832eac2d452f274f11f9 100644 (file)
@@ -4110,8 +4110,8 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
 
        return eb;
 err:
-       for (i--; i >= 0; i--)
-               __free_page(eb->pages[i]);
+       for (; i > 0; i--)
+               __free_page(eb->pages[i - 1]);
        __free_extent_buffer(eb);
        return NULL;
 }
index 85a1e5053fe63a9d8df6682da38198883e00bb22..95542a1b3dfc99632219310f0108788789247fc9 100644 (file)
@@ -94,8 +94,6 @@ static noinline int cow_file_range(struct inode *inode,
                                   struct page *locked_page,
                                   u64 start, u64 end, int *page_started,
                                   unsigned long *nr_written, int unlock);
-static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root, struct inode *inode);
 
 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
                                     struct inode *inode,  struct inode *dir,
@@ -2746,8 +2744,9 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
        return btrfs_update_inode_item(trans, root, inode);
 }
 
-static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root, struct inode *inode)
+noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+                                        struct btrfs_root *root,
+                                        struct inode *inode)
 {
        int ret;
 
index 61168805f175b3c50ffef99b87e97fdeb2643ebd..8fcf9a59c28d08c5c8a6c65e34844e321ca2b66c 100644 (file)
@@ -343,7 +343,8 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
                return -EOPNOTSUPP;
        if (copy_from_user(&range, arg, sizeof(range)))
                return -EFAULT;
-       if (range.start > total_bytes)
+       if (range.start > total_bytes ||
+           range.len < fs_info->sb->s_blocksize)
                return -EINVAL;
 
        range.len = min(range.len, total_bytes - range.start);
@@ -570,7 +571,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
                ret = btrfs_commit_transaction(trans,
                                               root->fs_info->extent_root);
        }
-       BUG_ON(ret);
+       if (ret)
+               goto fail;
 
        ret = pending_snapshot->error;
        if (ret)
index 5039686df6ae8e801ed8985eb5e821a226e18855..fe9d02c45f8e521f87b44d6deff3e5f8d99aa3a9 100644 (file)
@@ -790,8 +790,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
        }
 
        path = btrfs_alloc_path();
-       if (!path)
-               return -ENOMEM;
+       if (!path) {
+               ret = -ENOMEM;
+               goto out_free_root;
+       }
 
        key.objectid = 0;
        key.type = BTRFS_QGROUP_STATUS_KEY;
@@ -800,7 +802,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
        ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
                                      sizeof(*ptr));
        if (ret)
-               goto out;
+               goto out_free_path;
 
        leaf = path->nodes[0];
        ptr = btrfs_item_ptr(leaf, path->slots[0],
@@ -818,8 +820,15 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
        fs_info->quota_root = quota_root;
        fs_info->pending_quota_state = 1;
        spin_unlock(&fs_info->qgroup_lock);
-out:
+out_free_path:
        btrfs_free_path(path);
+out_free_root:
+       if (ret) {
+               free_extent_buffer(quota_root->node);
+               free_extent_buffer(quota_root->commit_root);
+               kfree(quota_root);
+       }
+out:
        return ret;
 }
 
index c7beb543a4a89300f1e586492b767ea8f9bef683..e78b297b0b00cc990e8eb7a3f1f34619638e5ef4 100644 (file)
@@ -745,31 +745,36 @@ typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
                                   void *ctx);
 
 /*
- * Helper function to iterate the entries in ONE btrfs_inode_ref.
+ * Helper function to iterate the entries in ONE btrfs_inode_ref or
+ * btrfs_inode_extref.
  * The iterate callback may return a non zero value to stop iteration. This can
  * be a negative value for error codes or 1 to simply stop it.
  *
- * path must point to the INODE_REF when called.
+ * path must point to the INODE_REF or INODE_EXTREF when called.
  */
 static int iterate_inode_ref(struct send_ctx *sctx,
                             struct btrfs_root *root, struct btrfs_path *path,
                             struct btrfs_key *found_key, int resolve,
                             iterate_inode_ref_t iterate, void *ctx)
 {
-       struct extent_buffer *eb;
+       struct extent_buffer *eb = path->nodes[0];
        struct btrfs_item *item;
        struct btrfs_inode_ref *iref;
+       struct btrfs_inode_extref *extref;
        struct btrfs_path *tmp_path;
        struct fs_path *p;
-       u32 cur;
-       u32 len;
+       u32 cur = 0;
        u32 total;
-       int slot;
+       int slot = path->slots[0];
        u32 name_len;
        char *start;
        int ret = 0;
-       int num;
+       int num = 0;
        int index;
+       u64 dir;
+       unsigned long name_off;
+       unsigned long elem_size;
+       unsigned long ptr;
 
        p = fs_path_alloc_reversed(sctx);
        if (!p)
@@ -781,24 +786,40 @@ static int iterate_inode_ref(struct send_ctx *sctx,
                return -ENOMEM;
        }
 
-       eb = path->nodes[0];
-       slot = path->slots[0];
-       item = btrfs_item_nr(eb, slot);
-       iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
-       cur = 0;
-       len = 0;
-       total = btrfs_item_size(eb, item);
 
-       num = 0;
+       if (found_key->type == BTRFS_INODE_REF_KEY) {
+               ptr = (unsigned long)btrfs_item_ptr(eb, slot,
+                                                   struct btrfs_inode_ref);
+               item = btrfs_item_nr(eb, slot);
+               total = btrfs_item_size(eb, item);
+               elem_size = sizeof(*iref);
+       } else {
+               ptr = btrfs_item_ptr_offset(eb, slot);
+               total = btrfs_item_size_nr(eb, slot);
+               elem_size = sizeof(*extref);
+       }
+
        while (cur < total) {
                fs_path_reset(p);
 
-               name_len = btrfs_inode_ref_name_len(eb, iref);
-               index = btrfs_inode_ref_index(eb, iref);
+               if (found_key->type == BTRFS_INODE_REF_KEY) {
+                       iref = (struct btrfs_inode_ref *)(ptr + cur);
+                       name_len = btrfs_inode_ref_name_len(eb, iref);
+                       name_off = (unsigned long)(iref + 1);
+                       index = btrfs_inode_ref_index(eb, iref);
+                       dir = found_key->offset;
+               } else {
+                       extref = (struct btrfs_inode_extref *)(ptr + cur);
+                       name_len = btrfs_inode_extref_name_len(eb, extref);
+                       name_off = (unsigned long)&extref->name;
+                       index = btrfs_inode_extref_index(eb, extref);
+                       dir = btrfs_inode_extref_parent(eb, extref);
+               }
+
                if (resolve) {
-                       start = btrfs_iref_to_path(root, tmp_path, iref, eb,
-                                               found_key->offset, p->buf,
-                                               p->buf_len);
+                       start = btrfs_ref_to_path(root, tmp_path, name_len,
+                                                 name_off, eb, dir,
+                                                 p->buf, p->buf_len);
                        if (IS_ERR(start)) {
                                ret = PTR_ERR(start);
                                goto out;
@@ -809,9 +830,10 @@ static int iterate_inode_ref(struct send_ctx *sctx,
                                                p->buf_len + p->buf - start);
                                if (ret < 0)
                                        goto out;
-                               start = btrfs_iref_to_path(root, tmp_path, iref,
-                                               eb, found_key->offset, p->buf,
-                                               p->buf_len);
+                               start = btrfs_ref_to_path(root, tmp_path,
+                                                         name_len, name_off,
+                                                         eb, dir,
+                                                         p->buf, p->buf_len);
                                if (IS_ERR(start)) {
                                        ret = PTR_ERR(start);
                                        goto out;
@@ -820,21 +842,16 @@ static int iterate_inode_ref(struct send_ctx *sctx,
                        }
                        p->start = start;
                } else {
-                       ret = fs_path_add_from_extent_buffer(p, eb,
-                                       (unsigned long)(iref + 1), name_len);
+                       ret = fs_path_add_from_extent_buffer(p, eb, name_off,
+                                                            name_len);
                        if (ret < 0)
                                goto out;
                }
 
-
-               len = sizeof(*iref) + name_len;
-               iref = (struct btrfs_inode_ref *)((char *)iref + len);
-               cur += len;
-
-               ret = iterate(num, found_key->offset, index, p, ctx);
+               cur += elem_size + name_len;
+               ret = iterate(num, dir, index, p, ctx);
                if (ret)
                        goto out;
-
                num++;
        }
 
@@ -998,7 +1015,8 @@ static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root,
        }
        btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
        if (found_key.objectid != ino ||
-               found_key.type != BTRFS_INODE_REF_KEY) {
+           (found_key.type != BTRFS_INODE_REF_KEY &&
+            found_key.type != BTRFS_INODE_EXTREF_KEY)) {
                ret = -ENOENT;
                goto out;
        }
@@ -1551,8 +1569,8 @@ static int get_first_ref(struct send_ctx *sctx,
        struct btrfs_key key;
        struct btrfs_key found_key;
        struct btrfs_path *path;
-       struct btrfs_inode_ref *iref;
        int len;
+       u64 parent_dir;
 
        path = alloc_path_for_send();
        if (!path)
@@ -1568,27 +1586,41 @@ static int get_first_ref(struct send_ctx *sctx,
        if (!ret)
                btrfs_item_key_to_cpu(path->nodes[0], &found_key,
                                path->slots[0]);
-       if (ret || found_key.objectid != key.objectid ||
-           found_key.type != key.type) {
+       if (ret || found_key.objectid != ino ||
+           (found_key.type != BTRFS_INODE_REF_KEY &&
+            found_key.type != BTRFS_INODE_EXTREF_KEY)) {
                ret = -ENOENT;
                goto out;
        }
 
-       iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
-                       struct btrfs_inode_ref);
-       len = btrfs_inode_ref_name_len(path->nodes[0], iref);
-       ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
-                       (unsigned long)(iref + 1), len);
+       if (key.type == BTRFS_INODE_REF_KEY) {
+               struct btrfs_inode_ref *iref;
+               iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                     struct btrfs_inode_ref);
+               len = btrfs_inode_ref_name_len(path->nodes[0], iref);
+               ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
+                                                    (unsigned long)(iref + 1),
+                                                    len);
+               parent_dir = found_key.offset;
+       } else {
+               struct btrfs_inode_extref *extref;
+               extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                       struct btrfs_inode_extref);
+               len = btrfs_inode_extref_name_len(path->nodes[0], extref);
+               ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
+                                       (unsigned long)&extref->name, len);
+               parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
+       }
        if (ret < 0)
                goto out;
        btrfs_release_path(path);
 
-       ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL,
+       ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL,
                        NULL, NULL);
        if (ret < 0)
                goto out;
 
-       *dir = found_key.offset;
+       *dir = parent_dir;
 
 out:
        btrfs_free_path(path);
@@ -2430,7 +2462,8 @@ verbose_printk("btrfs: send_create_inode %llu\n", ino);
                TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
        } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
                   S_ISFIFO(mode) || S_ISSOCK(mode)) {
-               TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, rdev);
+               TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
+               TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
        }
 
        ret = send_cmd(sctx);
@@ -3226,7 +3259,8 @@ static int process_all_refs(struct send_ctx *sctx,
                btrfs_item_key_to_cpu(eb, &found_key, slot);
 
                if (found_key.objectid != key.objectid ||
-                   found_key.type != key.type)
+                   (found_key.type != BTRFS_INODE_REF_KEY &&
+                    found_key.type != BTRFS_INODE_EXTREF_KEY))
                        break;
 
                ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb,
@@ -3987,7 +4021,7 @@ static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
        if (sctx->cur_ino == 0)
                goto out;
        if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
-           sctx->cmp_key->type <= BTRFS_INODE_REF_KEY)
+           sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
                goto out;
        if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
                goto out;
@@ -4033,22 +4067,21 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
        if (ret < 0)
                goto out;
 
-       if (!S_ISLNK(sctx->cur_inode_mode)) {
-               if (!sctx->parent_root || sctx->cur_inode_new) {
+       if (!sctx->parent_root || sctx->cur_inode_new) {
+               need_chown = 1;
+               if (!S_ISLNK(sctx->cur_inode_mode))
                        need_chmod = 1;
-                       need_chown = 1;
-               } else {
-                       ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
-                                       NULL, NULL, &right_mode, &right_uid,
-                                       &right_gid, NULL);
-                       if (ret < 0)
-                               goto out;
+       } else {
+               ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
+                               NULL, NULL, &right_mode, &right_uid,
+                               &right_gid, NULL);
+               if (ret < 0)
+                       goto out;
 
-                       if (left_uid != right_uid || left_gid != right_gid)
-                               need_chown = 1;
-                       if (left_mode != right_mode)
-                               need_chmod = 1;
-               }
+               if (left_uid != right_uid || left_gid != right_gid)
+                       need_chown = 1;
+               if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
+                       need_chmod = 1;
        }
 
        if (S_ISREG(sctx->cur_inode_mode)) {
@@ -4335,7 +4368,8 @@ static int changed_cb(struct btrfs_root *left_root,
 
        if (key->type == BTRFS_INODE_ITEM_KEY)
                ret = changed_inode(sctx, result);
-       else if (key->type == BTRFS_INODE_REF_KEY)
+       else if (key->type == BTRFS_INODE_REF_KEY ||
+                key->type == BTRFS_INODE_EXTREF_KEY)
                ret = changed_ref(sctx, result);
        else if (key->type == BTRFS_XATTR_ITEM_KEY)
                ret = changed_xattr(sctx, result);
index 77db875b511638b7ff94854c6b1482942da2b3fb..04bbfb1052ebfee9db25427d5542e795cac351cd 100644 (file)
@@ -1200,7 +1200,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        btrfs_i_size_write(parent_inode, parent_inode->i_size +
                                         dentry->d_name.len * 2);
        parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
-       ret = btrfs_update_inode(trans, parent_root, parent_inode);
+       ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
 fail:
index 029b903a4ae3797322e05090790b86c9e8596c43..0f5ebb72a5ea01693b339d66e3f928de9b783f78 100644 (file)
@@ -1819,6 +1819,13 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                                    "Failed to relocate sys chunks after "
                                    "device initialization. This can be fixed "
                                    "using the \"btrfs balance\" command.");
+               trans = btrfs_attach_transaction(root);
+               if (IS_ERR(trans)) {
+                       if (PTR_ERR(trans) == -ENOENT)
+                               return 0;
+                       return PTR_ERR(trans);
+               }
+               ret = btrfs_commit_transaction(trans, root);
        }
 
        return ret;
index f5054025f9da7c154b067db092cc44f8b953f835..4c6285fff598e1faf9bb471f8b3b5593db74b355 100644 (file)
@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
 
        err  = get_user(palp, &up->palette);
        err |= get_user(length, &up->length);
+       if (err)
+               return -EFAULT;
 
        up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
        err  = put_user(compat_ptr(palp), &up_native->palette);
index e4fb3ba5a58a3d50eb11a0f9533ca58ae5150283..3d7e09bcc0e9efad7418803737e385c76d22a776 100644 (file)
@@ -85,29 +85,38 @@ static struct rpc_clnt *nsm_create(struct net *net)
        return rpc_create(&args);
 }
 
+static struct rpc_clnt *nsm_client_set(struct lockd_net *ln,
+               struct rpc_clnt *clnt)
+{
+       spin_lock(&ln->nsm_clnt_lock);
+       if (ln->nsm_users == 0) {
+               if (clnt == NULL)
+                       goto out;
+               ln->nsm_clnt = clnt;
+       }
+       clnt = ln->nsm_clnt;
+       ln->nsm_users++;
+out:
+       spin_unlock(&ln->nsm_clnt_lock);
+       return clnt;
+}
+
 static struct rpc_clnt *nsm_client_get(struct net *net)
 {
-       static DEFINE_MUTEX(nsm_create_mutex);
-       struct rpc_clnt *clnt;
+       struct rpc_clnt *clnt, *new;
        struct lockd_net *ln = net_generic(net, lockd_net_id);
 
-       spin_lock(&ln->nsm_clnt_lock);
-       if (ln->nsm_users) {
-               ln->nsm_users++;
-               clnt = ln->nsm_clnt;
-               spin_unlock(&ln->nsm_clnt_lock);
+       clnt = nsm_client_set(ln, NULL);
+       if (clnt != NULL)
                goto out;
-       }
-       spin_unlock(&ln->nsm_clnt_lock);
 
-       mutex_lock(&nsm_create_mutex);
-       clnt = nsm_create(net);
-       if (!IS_ERR(clnt)) {
-               ln->nsm_clnt = clnt;
-               smp_wmb();
-               ln->nsm_users = 1;
-       }
-       mutex_unlock(&nsm_create_mutex);
+       clnt = new = nsm_create(net);
+       if (IS_ERR(clnt))
+               goto out;
+
+       clnt = nsm_client_set(ln, new);
+       if (clnt != new)
+               rpc_shutdown_client(new);
 out:
        return clnt;
 }
@@ -115,18 +124,16 @@ out:
 static void nsm_client_put(struct net *net)
 {
        struct lockd_net *ln = net_generic(net, lockd_net_id);
-       struct rpc_clnt *clnt = ln->nsm_clnt;
-       int shutdown = 0;
+       struct rpc_clnt *clnt = NULL;
 
        spin_lock(&ln->nsm_clnt_lock);
-       if (ln->nsm_users) {
-               if (--ln->nsm_users)
-                       ln->nsm_clnt = NULL;
-               shutdown = !ln->nsm_users;
+       ln->nsm_users--;
+       if (ln->nsm_users == 0) {
+               clnt = ln->nsm_clnt;
+               ln->nsm_clnt = NULL;
        }
        spin_unlock(&ln->nsm_clnt_lock);
-
-       if (shutdown)
+       if (clnt != NULL)
                rpc_shutdown_client(clnt);
 }
 
index c78bb997e2c60846a1f5e261664d96ee1656fe6d..af1cbaf535edeb395954583d1d1c1cd0c255c64c 100644 (file)
        {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
index 214caa33433b9a58d4635d1f3214b099c515c8e5..2ac60c9cf6448fac839074e752d61f2de7b56c0a 100644 (file)
@@ -24,6 +24,7 @@
 #ifndef _LINUX_RBTREE_AUGMENTED_H
 #define _LINUX_RBTREE_AUGMENTED_H
 
+#include <linux/compiler.h>
 #include <linux/rbtree.h>
 
 /*
index d9b0c84220c73897630f7cc7d35864c4eeeadb69..8f721e465e05b968ed1b5223c4b1e7eb61be78e2 100644 (file)
@@ -3,8 +3,6 @@
  *
  * Copyright (C) 2009-2010 Nokia Corporation
  *
- * Contact: Aaro Koskinen <aaro.koskinen@nokia.com>
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
index 0dfeca4324ee06dbb6566db568b9874758344232..86e3285ae7e5b48fd9b123291da2d8dbfe0d7460 100644 (file)
@@ -174,10 +174,8 @@ signing_key.priv signing_key.x509: x509.genkey
        @echo "###"
        @echo "### If this takes a long time, you might wish to run rngd in the"
        @echo "### background to keep the supply of entropy topped up.  It"
-       @echo "### needs to be run as root, and should use a hardware random"
-       @echo "### number generator if one is available, eg:"
-       @echo "###"
-       @echo "###     rngd -r /dev/hwrandom"
+       @echo "### needs to be run as root, and uses a hardware random"
+       @echo "### number generator if one is available."
        @echo "###"
        openssl req -new -nodes -utf8 $(sign_key_with_hash) -days 36500 -batch \
                -x509 -config x509.genkey \
index 13774b3b39aac9b73e25ba34a366d5402b70df00..f24f724620dd8489fc2e3cb9781433df84096c96 100644 (file)
@@ -1962,9 +1962,8 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
         * trading it for newcg is protected by cgroup_mutex, we're safe to drop
         * it here; it will be freed under RCU.
         */
-       put_css_set(oldcg);
-
        set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
+       put_css_set(oldcg);
 }
 
 /**
@@ -4815,31 +4814,20 @@ static const struct file_operations proc_cgroupstats_operations = {
  *
  * A pointer to the shared css_set was automatically copied in
  * fork.c by dup_task_struct().  However, we ignore that copy, since
- * it was not made under the protection of RCU, cgroup_mutex or
- * threadgroup_change_begin(), so it might no longer be a valid
- * cgroup pointer.  cgroup_attach_task() might have already changed
- * current->cgroups, allowing the previously referenced cgroup
- * group to be removed and freed.
- *
- * Outside the pointer validity we also need to process the css_set
- * inheritance between threadgoup_change_begin() and
- * threadgoup_change_end(), this way there is no leak in any process
- * wide migration performed by cgroup_attach_proc() that could otherwise
- * miss a thread because it is too early or too late in the fork stage.
+ * it was not made under the protection of RCU or cgroup_mutex, so
+ * might no longer be a valid cgroup pointer.  cgroup_attach_task() might
+ * have already changed current->cgroups, allowing the previously
+ * referenced cgroup group to be removed and freed.
  *
  * At the point that cgroup_fork() is called, 'current' is the parent
  * task, and the passed argument 'child' points to the child task.
  */
 void cgroup_fork(struct task_struct *child)
 {
-       /*
-        * We don't need to task_lock() current because current->cgroups
-        * can't be changed concurrently here. The parent obviously hasn't
-        * exited and called cgroup_exit(), and we are synchronized against
-        * cgroup migration through threadgroup_change_begin().
-        */
+       task_lock(current);
        child->cgroups = current->cgroups;
        get_css_set(child->cgroups);
+       task_unlock(current);
        INIT_LIST_HEAD(&child->cg_list);
 }
 
@@ -4895,19 +4883,10 @@ void cgroup_post_fork(struct task_struct *child)
         */
        if (use_task_css_set_links) {
                write_lock(&css_set_lock);
-               if (list_empty(&child->cg_list)) {
-                       /*
-                        * It's safe to use child->cgroups without task_lock()
-                        * here because we are protected through
-                        * threadgroup_change_begin() against concurrent
-                        * css_set change in cgroup_task_migrate(). Also
-                        * the task can't exit at that point until
-                        * wake_up_new_task() is called, so we are protected
-                        * against cgroup_exit() setting child->cgroup to
-                        * init_css_set.
-                        */
+               task_lock(child);
+               if (list_empty(&child->cg_list))
                        list_add(&child->cg_list, &child->cgroups->tasks);
-               }
+               task_unlock(child);
                write_unlock(&css_set_lock);
        }
 }
index eb00be205811959c708fe2bf5e3aee0547dcfa36..7b07cc0dfb75fb6b2f2f802178abf82565d9c1a4 100644 (file)
@@ -71,12 +71,22 @@ err_alloc:
        return NULL;
 }
 
+/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
+#define MAX_PID_NS_LEVEL 32
+
 static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
 {
        struct pid_namespace *ns;
        unsigned int level = parent_pid_ns->level + 1;
-       int i, err = -ENOMEM;
+       int i;
+       int err;
+
+       if (level > MAX_PID_NS_LEVEL) {
+               err = -EINVAL;
+               goto out;
+       }
 
+       err = -ENOMEM;
        ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
        if (ns == NULL)
                goto out;
index d951daa0ca9a81b21f6b1387b140f537b53c27c1..042d221d33cc1675fadf7ee291e86717f2c8f6c9 100644 (file)
@@ -2982,7 +2982,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)
 
        set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
        local_irq_restore(flags);
-       return true;
+       return ret;
 }
 EXPORT_SYMBOL(cancel_delayed_work);
 
index ca208a92628c1b49a6f5c1fece1a81592a65ecaa..54920433705adbff385b319574a9f04b50d8ca77 100644 (file)
@@ -178,7 +178,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
        struct gen_pool_chunk *chunk;
        int nbits = size >> pool->min_alloc_order;
        int nbytes = sizeof(struct gen_pool_chunk) +
-                               (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
+                               BITS_TO_LONGS(nbits) * sizeof(long);
 
        chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
        if (unlikely(chunk == NULL))
index 479a1e751a73fb4781e1b856db60b2381909c9e6..8a5ac8c686b03480fae7d5b193a5c714c7c2397b 100644 (file)
@@ -196,28 +196,28 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
        BUG_ON(atomic_read(&mm->mm_users) <= 0);
 
        /*
-       * Verify that mmu_notifier_init() already run and the global srcu is
-       * initialized.
-       */
+        * Verify that mmu_notifier_init() already run and the global srcu is
+        * initialized.
+        */
        BUG_ON(!srcu.per_cpu_ref);
 
+       ret = -ENOMEM;
+       mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+       if (unlikely(!mmu_notifier_mm))
+               goto out;
+
        if (take_mmap_sem)
                down_write(&mm->mmap_sem);
        ret = mm_take_all_locks(mm);
        if (unlikely(ret))
-               goto out;
+               goto out_clean;
 
        if (!mm_has_notifiers(mm)) {
-               mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
-                                       GFP_KERNEL);
-               if (unlikely(!mmu_notifier_mm)) {
-                       ret = -ENOMEM;
-                       goto out_of_mem;
-               }
                INIT_HLIST_HEAD(&mmu_notifier_mm->list);
                spin_lock_init(&mmu_notifier_mm->lock);
 
                mm->mmu_notifier_mm = mmu_notifier_mm;
+               mmu_notifier_mm = NULL;
        }
        atomic_inc(&mm->mm_count);
 
@@ -233,12 +233,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
        hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
        spin_unlock(&mm->mmu_notifier_mm->lock);
 
-out_of_mem:
        mm_drop_all_locks(mm);
-out:
+out_clean:
        if (take_mmap_sem)
                up_write(&mm->mmap_sem);
-
+       kfree(mmu_notifier_mm);
+out:
        BUG_ON(atomic_read(&mm->mm_users) <= 0);
        return ret;
 }
index bb90971182bd8c833e40f508e3c8b8677d6913c8..5b74de6702e06587e0d4f36060f810526ff8fbe3 100644 (file)
@@ -1809,10 +1809,10 @@ static void __paginginit init_zone_allows_reclaim(int nid)
        int i;
 
        for_each_online_node(i)
-               if (node_distance(nid, i) <= RECLAIM_DISTANCE) {
+               if (node_distance(nid, i) <= RECLAIM_DISTANCE)
                        node_set(i, NODE_DATA(nid)->reclaim_nodes);
+               else
                        zone_reclaim_mode = 1;
-               }
 }
 
 #else  /* CONFIG_NUMA */
@@ -5825,7 +5825,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
        ret = start_isolate_page_range(pfn_max_align_down(start),
                                       pfn_max_align_up(end), migratetype);
        if (ret)
-               goto done;
+               return ret;
 
        ret = __alloc_contig_migrate_range(&cc, start, end);
        if (ret)
index 7df7984d476c8661b900725c0e2c3f33c883a32d..2ee1ef0f317b7487bfb21b7a6717b1e12d1f7ef4 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -56,6 +56,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/migrate.h>
 #include <linux/hugetlb.h>
+#include <linux/backing-dev.h>
 
 #include <asm/tlbflush.h>
 
@@ -926,11 +927,8 @@ int page_mkclean(struct page *page)
 
        if (page_mapped(page)) {
                struct address_space *mapping = page_mapping(page);
-               if (mapping) {
+               if (mapping)
                        ret = page_mkclean_file(mapping, page);
-                       if (page_test_and_clear_dirty(page_to_pfn(page), 1))
-                               ret = 1;
-               }
        }
 
        return ret;
@@ -1116,6 +1114,7 @@ void page_add_file_rmap(struct page *page)
  */
 void page_remove_rmap(struct page *page)
 {
+       struct address_space *mapping = page_mapping(page);
        bool anon = PageAnon(page);
        bool locked;
        unsigned long flags;
@@ -1138,8 +1137,19 @@ void page_remove_rmap(struct page *page)
         * this if the page is anon, so about to be freed; but perhaps
         * not if it's in swapcache - there might be another pte slot
         * containing the swap entry, but page not yet written to swap.
+        *
+        * And we can skip it on file pages, so long as the filesystem
+        * participates in dirty tracking; but need to catch shm and tmpfs
+        * and ramfs pages which have been modified since creation by read
+        * fault.
+        *
+        * Note that mapping must be decided above, before decrementing
+        * mapcount (which luckily provides a barrier): once page is unmapped,
+        * it could be truncated and page->mapping reset to NULL at any moment.
+        * Note also that we are relying on page_mapping(page) to set mapping
+        * to &swapper_space when PageSwapCache(page).
         */
-       if ((!anon || PageSwapCache(page)) &&
+       if (mapping && !mapping_cap_account_dirty(mapping) &&
            page_test_and_clear_dirty(page_to_pfn(page), 1))
                set_page_dirty(page);
        /*
index aaaadfbe36e9525a42a86e1f649bf66e59eb7274..75853cabf4c97b153873eda4ce67cd228581fd15 100644 (file)
@@ -254,7 +254,6 @@ struct sock_xprt {
        void                    (*old_data_ready)(struct sock *, int);
        void                    (*old_state_change)(struct sock *);
        void                    (*old_write_space)(struct sock *);
-       void                    (*old_error_report)(struct sock *);
 };
 
 /*
@@ -737,10 +736,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
                dprintk("RPC:       sendmsg returned unrecognized error %d\n",
                        -status);
        case -ECONNRESET:
-       case -EPIPE:
                xs_tcp_shutdown(xprt);
        case -ECONNREFUSED:
        case -ENOTCONN:
+       case -EPIPE:
                clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
        }
 
@@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
        transport->old_data_ready = sk->sk_data_ready;
        transport->old_state_change = sk->sk_state_change;
        transport->old_write_space = sk->sk_write_space;
-       transport->old_error_report = sk->sk_error_report;
 }
 
 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
@@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
        sk->sk_data_ready = transport->old_data_ready;
        sk->sk_state_change = transport->old_state_change;
        sk->sk_write_space = transport->old_write_space;
-       sk->sk_error_report = transport->old_error_report;
 }
 
 static void xs_reset_transport(struct sock_xprt *transport)
@@ -1453,7 +1450,7 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
        xprt_clear_connecting(xprt);
 }
 
-static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
 {
        smp_mb__before_clear_bit();
        clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
@@ -1461,6 +1458,11 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
        clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
        clear_bit(XPRT_CLOSING, &xprt->state);
        smp_mb__after_clear_bit();
+}
+
+static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+{
+       xs_sock_reset_connection_flags(xprt);
        /* Mark transport as closed and wake up all pending tasks */
        xprt_disconnect_done(xprt);
 }
@@ -1516,6 +1518,7 @@ static void xs_tcp_state_change(struct sock *sk)
        case TCP_CLOSE_WAIT:
                /* The server initiated a shutdown of the socket */
                xprt->connect_cookie++;
+               clear_bit(XPRT_CONNECTED, &xprt->state);
                xs_tcp_force_close(xprt);
        case TCP_CLOSING:
                /*
@@ -1540,25 +1543,6 @@ static void xs_tcp_state_change(struct sock *sk)
        read_unlock_bh(&sk->sk_callback_lock);
 }
 
-/**
- * xs_error_report - callback mainly for catching socket errors
- * @sk: socket
- */
-static void xs_error_report(struct sock *sk)
-{
-       struct rpc_xprt *xprt;
-
-       read_lock_bh(&sk->sk_callback_lock);
-       if (!(xprt = xprt_from_sock(sk)))
-               goto out;
-       dprintk("RPC:       %s client %p...\n"
-                       "RPC:       error %d\n",
-                       __func__, xprt, sk->sk_err);
-       xprt_wake_pending_tasks(xprt, -EAGAIN);
-out:
-       read_unlock_bh(&sk->sk_callback_lock);
-}
-
 static void xs_write_space(struct sock *sk)
 {
        struct socket *sock;
@@ -1858,7 +1842,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
                sk->sk_user_data = xprt;
                sk->sk_data_ready = xs_local_data_ready;
                sk->sk_write_space = xs_udp_write_space;
-               sk->sk_error_report = xs_error_report;
                sk->sk_allocation = GFP_ATOMIC;
 
                xprt_clear_connected(xprt);
@@ -1983,7 +1966,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                sk->sk_user_data = xprt;
                sk->sk_data_ready = xs_udp_data_ready;
                sk->sk_write_space = xs_udp_write_space;
-               sk->sk_error_report = xs_error_report;
                sk->sk_no_check = UDP_CSUM_NORCV;
                sk->sk_allocation = GFP_ATOMIC;
 
@@ -2050,10 +2032,8 @@ static void xs_abort_connection(struct sock_xprt *transport)
        any.sa_family = AF_UNSPEC;
        result = kernel_connect(transport->sock, &any, sizeof(any), 0);
        if (!result)
-               xs_sock_mark_closed(&transport->xprt);
-       else
-               dprintk("RPC:       AF_UNSPEC connect return code %d\n",
-                               result);
+               xs_sock_reset_connection_flags(&transport->xprt);
+       dprintk("RPC:       AF_UNSPEC connect return code %d\n", result);
 }
 
 static void xs_tcp_reuse_connection(struct sock_xprt *transport)
@@ -2098,7 +2078,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                sk->sk_data_ready = xs_tcp_data_ready;
                sk->sk_state_change = xs_tcp_state_change;
                sk->sk_write_space = xs_tcp_write_space;
-               sk->sk_error_report = xs_error_report;
                sk->sk_allocation = GFP_ATOMIC;
 
                /* socket options */
index cf5fd220309be40d597ac7ca02e0d61658cd8eee..813200384d97cfc7f06a76e9b2f6286be7dfa7ab 100644 (file)
@@ -724,6 +724,8 @@ fail:
  */
 static void free_profile(struct aa_profile *profile)
 {
+       struct aa_profile *p;
+
        AA_DEBUG("%s(%p)\n", __func__, profile);
 
        if (!profile)
@@ -751,7 +753,27 @@ static void free_profile(struct aa_profile *profile)
        aa_put_dfa(profile->xmatch);
        aa_put_dfa(profile->policy.dfa);
 
-       aa_put_profile(profile->replacedby);
+       /* put the profile reference for replacedby, but not via
+        * put_profile(kref_put).
+        * replacedby can form a long chain that can result in cascading
+        * frees that blows the stack because kref_put makes a nested fn
+        * call (it looks like recursion, with free_profile calling
+        * free_profile) for each profile in the chain lp#1056078.
+        */
+       for (p = profile->replacedby; p; ) {
+               if (atomic_dec_and_test(&p->base.count.refcount)) {
+                       /* no more refs on p, grab its replacedby */
+                       struct aa_profile *next = p->replacedby;
+                       /* break the chain */
+                       p->replacedby = NULL;
+                       /* now free p, chain is broken */
+                       free_profile(p);
+
+                       /* follow up with next profile in the chain */
+                       p = next;
+               } else
+                       break;
+       }
 
        kzfree(profile);
 }
index 44dfc415a379afc9c327388664e949bfcc855bcb..842c254396dbe8ab2a54fd566b37d29ce6fd4d8b 100644 (file)
@@ -42,7 +42,10 @@ struct dev_exception_item {
 struct dev_cgroup {
        struct cgroup_subsys_state css;
        struct list_head exceptions;
-       bool deny_all;
+       enum {
+               DEVCG_DEFAULT_ALLOW,
+               DEVCG_DEFAULT_DENY,
+       } behavior;
 };
 
 static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
@@ -182,13 +185,13 @@ static struct cgroup_subsys_state *devcgroup_create(struct cgroup *cgroup)
        parent_cgroup = cgroup->parent;
 
        if (parent_cgroup == NULL)
-               dev_cgroup->deny_all = false;
+               dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
        else {
                parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
                mutex_lock(&devcgroup_mutex);
                ret = dev_exceptions_copy(&dev_cgroup->exceptions,
                                          &parent_dev_cgroup->exceptions);
-               dev_cgroup->deny_all = parent_dev_cgroup->deny_all;
+               dev_cgroup->behavior = parent_dev_cgroup->behavior;
                mutex_unlock(&devcgroup_mutex);
                if (ret) {
                        kfree(dev_cgroup);
@@ -260,7 +263,7 @@ static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
         * - List the exceptions in case the default policy is to deny
         * This way, the file remains as a "whitelist of devices"
         */
-       if (devcgroup->deny_all == false) {
+       if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
                set_access(acc, ACC_MASK);
                set_majmin(maj, ~0);
                set_majmin(min, ~0);
@@ -314,12 +317,12 @@ static int may_access(struct dev_cgroup *dev_cgroup,
         * In two cases we'll consider this new exception valid:
         * - the dev cgroup has its default policy to allow + exception list:
         *   the new exception should *not* match any of the exceptions
-        *   (!deny_all, !match)
+        *   (behavior == DEVCG_DEFAULT_ALLOW, !match)
         * - the dev cgroup has its default policy to deny + exception list:
         *   the new exception *should* match the exceptions
-        *   (deny_all, match)
+        *   (behavior == DEVCG_DEFAULT_DENY, match)
         */
-       if (dev_cgroup->deny_all == match)
+       if ((dev_cgroup->behavior == DEVCG_DEFAULT_DENY) == match)
                return 1;
        return 0;
 }
@@ -341,6 +344,17 @@ static int parent_has_perm(struct dev_cgroup *childcg,
        return may_access(parent, ex);
 }
 
+/**
+ * may_allow_all - checks if it's possible to change the behavior to
+ *                allow based on parent's rules.
+ * @parent: device cgroup's parent
+ * returns: != 0 in case it's allowed, 0 otherwise
+ */
+static inline int may_allow_all(struct dev_cgroup *parent)
+{
+       return parent->behavior == DEVCG_DEFAULT_ALLOW;
+}
+
 /*
  * Modify the exception list using allow/deny rules.
  * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
@@ -358,9 +372,11 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
                                   int filetype, const char *buffer)
 {
        const char *b;
-       char *endp;
-       int count;
+       char temp[12];          /* 11 + 1 characters needed for a u32 */
+       int count, rc;
        struct dev_exception_item ex;
+       struct cgroup *p = devcgroup->css.cgroup;
+       struct dev_cgroup *parent = cgroup_to_devcgroup(p->parent);
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -372,14 +388,18 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
        case 'a':
                switch (filetype) {
                case DEVCG_ALLOW:
-                       if (!parent_has_perm(devcgroup, &ex))
+                       if (!may_allow_all(parent))
                                return -EPERM;
                        dev_exception_clean(devcgroup);
-                       devcgroup->deny_all = false;
+                       rc = dev_exceptions_copy(&devcgroup->exceptions,
+                                                &parent->exceptions);
+                       if (rc)
+                               return rc;
+                       devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
                        break;
                case DEVCG_DENY:
                        dev_exception_clean(devcgroup);
-                       devcgroup->deny_all = true;
+                       devcgroup->behavior = DEVCG_DEFAULT_DENY;
                        break;
                default:
                        return -EINVAL;
@@ -402,8 +422,16 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
                ex.major = ~0;
                b++;
        } else if (isdigit(*b)) {
-               ex.major = simple_strtoul(b, &endp, 10);
-               b = endp;
+               memset(temp, 0, sizeof(temp));
+               for (count = 0; count < sizeof(temp) - 1; count++) {
+                       temp[count] = *b;
+                       b++;
+                       if (!isdigit(*b))
+                               break;
+               }
+               rc = kstrtou32(temp, 10, &ex.major);
+               if (rc)
+                       return -EINVAL;
        } else {
                return -EINVAL;
        }
@@ -416,8 +444,16 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
                ex.minor = ~0;
                b++;
        } else if (isdigit(*b)) {
-               ex.minor = simple_strtoul(b, &endp, 10);
-               b = endp;
+               memset(temp, 0, sizeof(temp));
+               for (count = 0; count < sizeof(temp) - 1; count++) {
+                       temp[count] = *b;
+                       b++;
+                       if (!isdigit(*b))
+                               break;
+               }
+               rc = kstrtou32(temp, 10, &ex.minor);
+               if (rc)
+                       return -EINVAL;
        } else {
                return -EINVAL;
        }
@@ -452,7 +488,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
                 * an matching exception instead. And be silent about it: we
                 * don't want to break compatibility
                 */
-               if (devcgroup->deny_all == false) {
+               if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
                        dev_exception_rm(devcgroup, &ex);
                        return 0;
                }
@@ -463,7 +499,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
                 * an matching exception instead. And be silent about it: we
                 * don't want to break compatibility
                 */
-               if (devcgroup->deny_all == true) {
+               if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
                        dev_exception_rm(devcgroup, &ex);
                        return 0;
                }
@@ -533,10 +569,10 @@ struct cgroup_subsys devices_subsys = {
  *
  * returns 0 on success, -EPERM case the operation is not permitted
  */
-static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
-                                       short type, u32 major, u32 minor,
+static int __devcgroup_check_permission(short type, u32 major, u32 minor,
                                        short access)
 {
+       struct dev_cgroup *dev_cgroup;
        struct dev_exception_item ex;
        int rc;
 
@@ -547,6 +583,7 @@ static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
        ex.access = access;
 
        rcu_read_lock();
+       dev_cgroup = task_devcgroup(current);
        rc = may_access(dev_cgroup, &ex);
        rcu_read_unlock();
 
@@ -558,7 +595,6 @@ static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
 
 int __devcgroup_inode_permission(struct inode *inode, int mask)
 {
-       struct dev_cgroup *dev_cgroup = task_devcgroup(current);
        short type, access = 0;
 
        if (S_ISBLK(inode->i_mode))
@@ -570,13 +606,12 @@ int __devcgroup_inode_permission(struct inode *inode, int mask)
        if (mask & MAY_READ)
                access |= ACC_READ;
 
-       return __devcgroup_check_permission(dev_cgroup, type, imajor(inode),
-                                           iminor(inode), access);
+       return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
+                       access);
 }
 
 int devcgroup_inode_mknod(int mode, dev_t dev)
 {
-       struct dev_cgroup *dev_cgroup = task_devcgroup(current);
        short type;
 
        if (!S_ISBLK(mode) && !S_ISCHR(mode))
@@ -587,7 +622,7 @@ int devcgroup_inode_mknod(int mode, dev_t dev)
        else
                type = DEV_CHAR;
 
-       return __devcgroup_check_permission(dev_cgroup, type, MAJOR(dev),
-                                           MINOR(dev), ACC_MKNOD);
+       return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
+                       ACC_MKNOD);
 
 }
index e0fcff1e833107c8e91e35866e94fd847a300242..f7525392ce84f58e9668187bb3708869d5b7c35d 100644 (file)
@@ -162,14 +162,14 @@ void *write_thread_function(void *function_data)
        int index;
        struct write_thread_data *thread_data =
                (struct write_thread_data *)function_data;
-       while (!write_thread_data->stop)
+       while (!thread_data->stop)
                for (index = 0;
                     !thread_data->stop && (index < thread_data->n_fds);
                     ++index)
                        if ((write(thread_data->fds[index], &data, 1) < 1) &&
                                (errno != EAGAIN) &&
                                (errno != EWOULDBLOCK)) {
-                               write_thread_data->status = errno;
+                               thread_data->status = errno;
                                return;
                        }
 }
index cd1b03e80899960147c9fc18777a1f5a35f9e08f..b76edf2f833349c8762c95ec4fc20d36f44c73f0 100644 (file)
@@ -35,7 +35,7 @@
 #include <sys/mount.h>
 #include <sys/statfs.h>
 #include "../../include/uapi/linux/magic.h"
-#include "../../include/linux/kernel-page-flags.h"
+#include "../../include/uapi/linux/kernel-page-flags.h"
 
 
 #ifndef MAX_PATH
index af0f22fb1ef71e6dc94c620873bc3241c2883430..aca6edcbbc6fa09113389e87b3e76a55a871da98 100644 (file)
@@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
        int retval;
        int rc = -1;
        int namesize;
-       int i;
+       unsigned int i;
 
        mode |= S_IFREG;
 
@@ -381,25 +381,28 @@ error:
 
 static char *cpio_replace_env(char *new_location)
 {
-       char expanded[PATH_MAX + 1];
-       char env_var[PATH_MAX + 1];
-       char *start;
-       char *end;
-
-       for (start = NULL; (start = strstr(new_location, "${")); ) {
-               end = strchr(start, '}');
-               if (start < end) {
-                       *env_var = *expanded = '\0';
-                       strncat(env_var, start + 2, end - start - 2);
-                       strncat(expanded, new_location, start - new_location);
-                       strncat(expanded, getenv(env_var), PATH_MAX);
-                       strncat(expanded, end + 1, PATH_MAX);
-                       strncpy(new_location, expanded, PATH_MAX);
-               } else
-                       break;
-       }
-
-       return new_location;
+       char expanded[PATH_MAX + 1];
+       char env_var[PATH_MAX + 1];
+       char *start;
+       char *end;
+
+       for (start = NULL; (start = strstr(new_location, "${")); ) {
+               end = strchr(start, '}');
+               if (start < end) {
+                       *env_var = *expanded = '\0';
+                       strncat(env_var, start + 2, end - start - 2);
+                       strncat(expanded, new_location, start - new_location);
+                       strncat(expanded, getenv(env_var),
+                               PATH_MAX - strlen(expanded));
+                       strncat(expanded, end + 1,
+                               PATH_MAX - strlen(expanded));
+                       strncpy(new_location, expanded, PATH_MAX);
+                       new_location[PATH_MAX] = 0;
+               } else
+                       break;
+       }
+
+       return new_location;
 }