]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branches 'perf-urgent-for-linus', 'sched-urgent-for-linus', 'timers-urgent...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 19 Jun 2011 16:00:18 +0000 (09:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 19 Jun 2011 16:00:18 +0000 (09:00 -0700)
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tools/perf: Fix static build of perf tool
  tracing: Fix regression in printk_formats file

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  generic-ipi: Fix kexec boot crash by initializing call_single_queue before enabling interrupts

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  clocksource: Make watchdog robust vs. interruption
  timerfd: Fix wakeup of processes when timer is cancelled on clock change

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, MAINTAINERS: Add x86 MCE people
  x86, efi: Do not reserve boot services regions within reserved areas

63 files changed:
Documentation/filesystems/proc.txt
MAINTAINERS
arch/arm/mach-omap1/Makefile
arch/arm/mach-omap1/pm_bus.c
arch/arm/mach-omap2/board-omap3pandora.c
arch/arm/mach-omap2/pm-debug.c
arch/arm/mach-pxa/spitz_pm.c
arch/arm/plat-omap/omap_device.c
arch/x86/include/asm/memblock.h
arch/x86/mm/memblock.c
arch/x86/platform/efi/efi.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/powernow-k8.c
drivers/gpio/gpio-omap.c
drivers/gpu/drm/i915/i915_irq.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/hiddev.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/coretemp.c
drivers/hwmon/ibmaem.c
drivers/hwmon/ibmpex.c
drivers/hwmon/s3c-hwmon.c
drivers/pcmcia/pxa2xx_vpac270.c
drivers/spi/spi_bfin5xx.c
fs/exec.c
fs/isofs/inode.c
fs/timerfd.c
fs/xfs/linux-2.6/xfs_file.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/xfs_log.c
include/asm-generic/gpio.h
include/linux/clocksource.h
include/linux/gpio.h
include/linux/interrupt.h
include/linux/kmod.h
include/trace/events/irq.h
kernel/kmod.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/rcutree_trace.c
kernel/softirq.c
kernel/time/clocksource.c
kernel/trace/trace_printk.c
mm/rmap.c
security/keys/request_key.c
sound/firewire/isight.c
sound/pci/emu10k1/emu10k1_main.c
sound/pci/hda/hda_beep.h
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/pci/lola/lola.c
sound/pci/rme9652/hdspm.c
sound/usb/6fire/firmware.c
sound/usb/6fire/pcm.c
tools/perf/Makefile
tools/perf/util/trace-event-parse.c

index f48178024067fd48fc3454806bdaa366c64dd966..db3b1aba32a3f9c0d80ce0cde2d8b6f1943f4dea 100644 (file)
@@ -843,6 +843,7 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
  TASKLET:          0          0          0        290
    SCHED:      27035      26983      26971      26746
  HRTIMER:          0          0          0          0
+     RCU:       1678       1769       2178       2250
 
 
 1.3 IDE devices in /proc/ide
index 6c59eb90fdf4267a0629674f6546fb5747cc63ff..b12b8c13f53a7e0b6972ad6ad695f59046408727 100644 (file)
@@ -6463,7 +6463,7 @@ M:        Jiri Kosina <jkosina@suse.cz>
 L:     linux-usb@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
 S:     Maintained
-F:     Documentation/usb/hiddev.txt
+F:     Documentation/hid/hiddev.txt
 F:     drivers/hid/usbhid/
 
 USB ISP116X DRIVER
@@ -7007,6 +7007,13 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
 S:     Maintained
 F:     drivers/platform/x86
 
+X86 MCE INFRASTRUCTURE
+M:     Tony Luck <tony.luck@intel.com>
+M:     Borislav Petkov <bp@amd64.org>
+L:     linux-edac@vger.kernel.org
+S:     Maintained
+F:     arch/x86/kernel/cpu/mcheck/*
+
 XEN HYPERVISOR INTERFACE
 M:     Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
index af98117043d214f8e088e8fd1429656410300a14..5b114d1558c83f41fe8acc524b1aa86f2ffa4a54 100644 (file)
@@ -4,14 +4,14 @@
 
 # Common support
 obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
-obj-y += clock.o clock_data.o opp_data.o reset.o
+obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o
 
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
 
 obj-$(CONFIG_OMAP_32K_TIMER)   += timer32k.o
 
 # Power Management
-obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o
+obj-$(CONFIG_PM) += pm.o sleep.o
 
 # DSP
 obj-$(CONFIG_OMAP_MBOX_FWK)    += mailbox_mach.o
index fe31d933f0edee78106d52d0c82fb95ef70a3a58..334fb8871bc319348f9edb152e5d611fb539aa6f 100644 (file)
@@ -56,9 +56,13 @@ static struct dev_power_domain default_power_domain = {
                USE_PLATFORM_PM_SLEEP_OPS
        },
 };
+#define OMAP1_PWR_DOMAIN (&default_power_domain)
+#else
+#define OMAP1_PWR_DOMAIN NULL
+#endif /* CONFIG_PM_RUNTIME */
 
 static struct pm_clk_notifier_block platform_bus_notifier = {
-       .pwr_domain = &default_power_domain,
+       .pwr_domain = OMAP1_PWR_DOMAIN,
        .con_ids = { "ick", "fck", NULL, },
 };
 
@@ -72,4 +76,4 @@ static int __init omap1_pm_runtime_init(void)
        return 0;
 }
 core_initcall(omap1_pm_runtime_init);
-#endif /* CONFIG_PM_RUNTIME */
+
index 2a0bb4818caef6a99a99e068946f970f8adcee1e..23f71d40883ea1fafbd2fd331d33fd9c581731dd 100644 (file)
@@ -84,7 +84,8 @@ static struct mtd_partition omap3pandora_nand_partitions[] = {
 
 static struct omap_nand_platform_data pandora_nand_data = {
        .cs             = 0,
-       .devsize        = 1,    /* '0' for 8-bit, '1' for 16-bit device */
+       .devsize        = NAND_BUSWIDTH_16,
+       .xfer_type      = NAND_OMAP_PREFETCH_DMA,
        .parts          = omap3pandora_nand_partitions,
        .nr_parts       = ARRAY_SIZE(omap3pandora_nand_partitions),
 };
index a5a83b358ddd89724de71eace26e11d8289c8e65..e01da45c053756f62ac08956620dc16dd09d3407 100644 (file)
@@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir;
 
 static int pm_dbg_init_done;
 
-static int __init pm_dbg_init(void);
+static int pm_dbg_init(void);
 
 enum {
        DEBUG_FILE_COUNTERS = 0,
@@ -595,7 +595,7 @@ static int option_set(void *data, u64 val)
 
 DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n");
 
-static int __init pm_dbg_init(void)
+static int pm_dbg_init(void)
 {
        int i;
        struct dentry *d;
index 7fe74067d85fc7e2aeb1469878f28c8cd73b3456..094279aefe9c6fd5d6a86dd88fbbc44b6d55c07b 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
+#include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/apm-emulation.h>
index a37b8eb65b76a6c2b13a019545852b3c9b48a7cc..49fc0df0c21f58be364d6da00e14d73eb8c32344 100644 (file)
@@ -84,6 +84,7 @@
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/pm_runtime.h>
 
 #include <plat/omap_device.h>
 #include <plat/omap_hwmod.h>
@@ -539,20 +540,34 @@ int omap_early_device_register(struct omap_device *od)
 static int _od_runtime_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
+       int ret;
+
+       ret = pm_generic_runtime_suspend(dev);
+
+       if (!ret)
+               omap_device_idle(pdev);
+
+       return ret;
+}
 
-       return omap_device_idle(pdev);
+static int _od_runtime_idle(struct device *dev)
+{
+       return pm_generic_runtime_idle(dev);
 }
 
 static int _od_runtime_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
 
-       return omap_device_enable(pdev);
+       omap_device_enable(pdev);
+
+       return pm_generic_runtime_resume(dev);
 }
 
 static struct dev_power_domain omap_device_power_domain = {
        .ops = {
                .runtime_suspend = _od_runtime_suspend,
+               .runtime_idle = _od_runtime_idle,
                .runtime_resume = _od_runtime_resume,
                USE_PLATFORM_PM_SLEEP_OPS
        }
index 19ae14ba69780c38defd3fbcfe7fdcb05859f9c0..0cd3800f33b9dcf68e39a1288ebf7ccd442f3a97 100644 (file)
@@ -4,7 +4,6 @@
 #define ARCH_DISCARD_MEMBLOCK
 
 u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
-void memblock_x86_to_bootmem(u64 start, u64 end);
 
 void memblock_x86_reserve_range(u64 start, u64 end, char *name);
 void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
 u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
 u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
 u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
+bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
 
 #endif
index aa1169392b83eb44350ec5da13cd23bfc3477986..992da5ec5a64d69ddc3381d9e8508e4d6061d4ef 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/range.h>
 
 /* Check for already reserved areas */
-static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align)
+bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
 {
        struct memblock_region *r;
        u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
                if (addr >= ei_last)
                        continue;
                *sizep = ei_last - addr;
-               while (check_with_memblock_reserved_size(&addr, sizep, align))
+               while (memblock_x86_check_reserved_size(&addr, sizep, align))
                        ;
 
                if (*sizep)
index 0d3a4fa34560018c55a2619251bcb0db6546c4d1..474356b98ede32e647d4343c32a7e7893fe4beff 100644 (file)
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
 
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                efi_memory_desc_t *md = p;
-               unsigned long long start = md->phys_addr;
-               unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+               u64 start = md->phys_addr;
+               u64 size = md->num_pages << EFI_PAGE_SHIFT;
 
                if (md->type != EFI_BOOT_SERVICES_CODE &&
                    md->type != EFI_BOOT_SERVICES_DATA)
                        continue;
-
-               memblock_x86_reserve_range(start, start + size, "EFI Boot");
+               /* Only reserve where possible:
+                * - Not within any already allocated areas
+                * - Not over any memory area (really needed, if above?)
+                * - Not within any part of the kernel
+                * - Not the bios reserved area
+               */
+               if ((start+size >= virt_to_phys(_text)
+                               && start <= virt_to_phys(_end)) ||
+                       !e820_all_mapped(start, start+size, E820_RAM) ||
+                       memblock_x86_check_reserved_size(&start, &size,
+                                                       1<<EFI_PAGE_SHIFT)) {
+                       /* Could not reserve, skip it */
+                       md->num_pages = 0;
+                       memblock_dbg(PFX "Could not reserve boot range "
+                                       "[0x%010llx-0x%010llx]\n",
+                                               start, start+size-1);
+               } else
+                       memblock_x86_reserve_range(start, start+size,
+                                                       "EFI Boot");
        }
 }
 
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
                    md->type != EFI_BOOT_SERVICES_DATA)
                        continue;
 
+               /* Could not reserve boot area */
+               if (!size)
+                       continue;
+
                free_bootmem_late(start, size);
        }
 }
index 853f92d23ddb5ed80c8b2836e878422121ae3f5f..faf7c521784874c0dbbce7a65b9b05d3ea379f2f 100644 (file)
@@ -298,11 +298,13 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
        old_index = stat->last_index;
        new_index = freq_table_get_index(stat, freq->new);
 
-       cpufreq_stats_update(freq->cpu);
-       if (old_index == new_index)
+       /* We can't do stat->time_in_state[-1]= .. */
+       if (old_index == -1 || new_index == -1)
                return 0;
 
-       if (old_index == -1 || new_index == -1)
+       cpufreq_stats_update(freq->cpu);
+
+       if (old_index == new_index)
                return 0;
 
        spin_lock(&cpufreq_stats_lock);
index 83479b6fb9a14fc9ef9826c18274560951c45f19..bce576d7478ed41f9b69ac727cc5d143d850bb83 100644 (file)
@@ -1079,6 +1079,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
        }
 
        res = transition_fid_vid(data, fid, vid);
+       if (res)
+               return res;
+
        freqs.new = find_khz_freq_from_fid(data->currfid);
 
        for_each_cpu(i, data->available_cores) {
@@ -1101,7 +1104,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
        /* get MSR index for hardware pstate transition */
        pstate = index & HW_PSTATE_MASK;
        if (pstate > data->max_hw_pstate)
-               return 0;
+               return -EINVAL;
+
        freqs.old = find_khz_freq_from_pstate(data->powernow_table,
                        data->currpstate);
        freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
index 01f74a8459d99e6b12a7d5bebe5820f17769a62d..35bebde23e835c7674aaaa5e6bbf0e998ab0fd32 100644 (file)
@@ -469,8 +469,9 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
                                        + OMAP24XX_GPIO_CLEARWKUENA);
                }
        }
-       /* This part needs to be executed always for OMAP34xx */
-       if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) {
+       /* This part needs to be executed always for OMAP{34xx, 44xx} */
+       if (cpu_is_omap34xx() || cpu_is_omap44xx() ||
+                       (bank->non_wakeup_gpios & gpio_bit)) {
                /*
                 * Log the edge gpio and manually trigger the IRQ
                 * after resume if the input level changes
index b9fafe3b045bda5817047f2a5cde8a6fc1b58265..9e34a1abeb61b8d43d49553d5fb2299a4429a026 100644 (file)
@@ -1740,6 +1740,16 @@ void ironlake_irq_preinstall(struct drm_device *dev)
                INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
 
        I915_WRITE(HWSTAM, 0xeffe);
+       if (IS_GEN6(dev)) {
+               /* Workaround stalls observed on Sandy Bridge GPUs by
+                * making the blitter command streamer generate a
+                * write to the Hardware Status Page for
+                * MI_USER_INTERRUPT.  This appears to serialize the
+                * previous seqno write out before the interrupt
+                * happens.
+                */
+               I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+       }
 
        /* XXX hotplug from PCH */
 
index 67d2a7585934c31b3607710c675e531acb21e6a6..36ca465c00cefac036a22fad61fc70b2568a1ccc 100644 (file)
@@ -305,6 +305,7 @@ config HID_MULTITOUCH
          - 3M PCT touch screens
          - ActionStar dual touch panels
          - Cando dual touch panels
+         - Chunghwa panels
          - CVTouch panels
          - Cypress TrueTouch panels
          - Elo TouchSystems IntelliTouch Plus panels
index c957c4b4fe703368a4d19e739277b5c9bd41383b..f7440e8ce3e77e299796e131bfcf4684ee36de44 100644 (file)
@@ -1359,6 +1359,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
index 0b374a6d6db007853f6762ef489e9191a5e1258e..aecb5a4b8d6d91e80fe4eb6398bf5992ea6a7f9d 100644 (file)
 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH      0xb19d
 #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
 
+#define USB_VENDOR_ID_CHUNGHWAT                0x2247
+#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH     0x0001
+
 #define USB_VENDOR_ID_CIDC             0x1677
 
 #define USB_VENDOR_ID_CMEDIA           0x0d8c
 #define USB_VENDOR_ID_UCLOGIC          0x5543
 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209    0x0042
 #define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5      0x6001
+#define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60     0x0064
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U   0x0003
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U   0x0004
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U   0x0005
index a5eda4c8127a274ee20343d26a82383135c3048b..0ec91c18a4216a52a4a3c7d85292a22f61a6e828 100644 (file)
@@ -501,17 +501,9 @@ static int magicmouse_probe(struct hid_device *hdev,
        }
        report->size = 6;
 
-       /*
-        * The device reponds with 'invalid report id' when feature
-        * report switching it into multitouch mode is sent to it.
-        *
-        * This results in -EIO from the _raw low-level transport callback,
-        * but there seems to be no other way of switching the mode.
-        * Thus the super-ugly hacky success check below.
-        */
        ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
                        HID_FEATURE_REPORT);
-       if (ret != -EIO) {
+       if (ret != sizeof(feature)) {
                hid_err(hdev, "unable to request touch data (%d)\n", ret);
                goto err_stop_hw;
        }
index ecd4d2db9e800ca2397c3ad18945d30b2080a028..0b2dcd0ee591f29fa8d1b6e14344c4ed2743cc9a 100644 (file)
@@ -64,6 +64,7 @@ struct mt_device {
        struct mt_class *mtclass;       /* our mt device class */
        unsigned last_field_index;      /* last field index of the report */
        unsigned last_slot_field;       /* the last field of a slot */
+       int last_mt_collection; /* last known mt-related collection */
        __s8 inputmode;         /* InputMode HID feature, -1 if non-existent */
        __u8 num_received;      /* how many contacts we received */
        __u8 num_expected;      /* expected last contact index */
@@ -225,8 +226,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                                cls->sn_move);
                        /* touchscreen emulation */
                        set_abs(hi->input, ABS_X, field, cls->sn_move);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_GD_Y:
                        if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -237,8 +240,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                                cls->sn_move);
                        /* touchscreen emulation */
                        set_abs(hi->input, ABS_Y, field, cls->sn_move);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                }
                return 0;
@@ -246,31 +251,40 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        case HID_UP_DIGITIZER:
                switch (usage->hid) {
                case HID_DG_INRANGE:
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_CONFIDENCE:
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_TIPSWITCH:
                        hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
                        input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_CONTACTID:
                        input_mt_init_slots(hi->input, td->maxcontacts);
                        td->last_slot_field = usage->hid;
                        td->last_field_index = field->index;
+                       td->last_mt_collection = usage->collection_index;
                        return 1;
                case HID_DG_WIDTH:
                        hid_map_usage(hi, usage, bit, max,
                                        EV_ABS, ABS_MT_TOUCH_MAJOR);
                        set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field,
                                cls->sn_width);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_HEIGHT:
                        hid_map_usage(hi, usage, bit, max,
@@ -279,8 +293,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                                cls->sn_height);
                        input_set_abs_params(hi->input,
                                        ABS_MT_ORIENTATION, 0, 1, 0, 0);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_TIPPRESSURE:
                        if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -292,16 +308,20 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        /* touchscreen emulation */
                        set_abs(hi->input, ABS_PRESSURE, field,
                                cls->sn_pressure);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_CONTACTCOUNT:
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index)
+                               td->last_field_index = field->index;
                        return 1;
                case HID_DG_CONTACTMAX:
                        /* we don't set td->last_slot_field as contactcount and
                         * contact max are global to the report */
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index)
+                               td->last_field_index = field->index;
                        return -1;
                }
                /* let hid-input decide for the others */
@@ -516,6 +536,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
        }
        td->mtclass = mtclass;
        td->inputmode = -1;
+       td->last_mt_collection = -1;
        hid_set_drvdata(hdev, td);
 
        ret = hid_parse(hdev);
@@ -593,6 +614,11 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
                        USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
 
+       /* Chunghwa Telecom touch panels */
+       {  .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
+                       USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+
        /* CVTouch panels */
        { .driver_data = MT_CLS_DEFAULT,
                HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
index 0e30b140edca173d3fb47b4d2b756ad7c53f9f43..621959d5cc42c6b6798328fe32ce32072fa8669a 100644 (file)
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
index ff3c644888b1ba8b0bf14e2426313b66db3688c0..7c1188b53c3ec0ea4aca4364239d9e3596cb7cfa 100644 (file)
@@ -248,12 +248,15 @@ static int hiddev_release(struct inode * inode, struct file * file)
                        usbhid_close(list->hiddev->hid);
                        usbhid_put_power(list->hiddev->hid);
                } else {
+                       mutex_unlock(&list->hiddev->existancelock);
                        kfree(list->hiddev);
+                       kfree(list);
+                       return 0;
                }
        }
 
-       kfree(list);
        mutex_unlock(&list->hiddev->existancelock);
+       kfree(list);
 
        return 0;
 }
@@ -923,10 +926,11 @@ void hiddev_disconnect(struct hid_device *hid)
        usb_deregister_dev(usbhid->intf, &hiddev_class);
 
        if (hiddev->open) {
+               mutex_unlock(&hiddev->existancelock);
                usbhid_close(hiddev->hid);
                wake_up_interruptible(&hiddev->wait);
        } else {
+               mutex_unlock(&hiddev->existancelock);
                kfree(hiddev);
        }
-       mutex_unlock(&hiddev->existancelock);
 }
index b5e892017e0c57497f7726d59812a56144f35ea8..dcb78a7a804754956035f2c57e2e1f8673ac45df 100644 (file)
@@ -268,6 +268,7 @@ static struct device_attribute atk_name_attr =
 static void atk_init_attribute(struct device_attribute *attr, char *name,
                sysfs_show_func show)
 {
+       sysfs_attr_init(&attr->attr);
        attr->attr.name = name;
        attr->attr.mode = 0444;
        attr->show = show;
@@ -1188,19 +1189,15 @@ static int atk_create_files(struct atk_data *data)
        int err;
 
        list_for_each_entry(s, &data->sensor_list, list) {
-               sysfs_attr_init(&s->input_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->input_attr);
                if (err)
                        return err;
-               sysfs_attr_init(&s->label_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->label_attr);
                if (err)
                        return err;
-               sysfs_attr_init(&s->limit1_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->limit1_attr);
                if (err)
                        return err;
-               sysfs_attr_init(&s->limit2_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->limit2_attr);
                if (err)
                        return err;
index 85e937984ff7594a8a610db84c08871898c67f15..0070d5476dd0b5ee96bba29259c821d5419bf117 100644 (file)
@@ -97,9 +97,7 @@ struct platform_data {
 struct pdev_entry {
        struct list_head list;
        struct platform_device *pdev;
-       unsigned int cpu;
        u16 phys_proc_id;
-       u16 cpu_core_id;
 };
 
 static LIST_HEAD(pdev_list);
@@ -653,9 +651,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
        }
 
        pdev_entry->pdev = pdev;
-       pdev_entry->cpu = cpu;
        pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
-       pdev_entry->cpu_core_id = TO_CORE_ID(cpu);
 
        list_add_tail(&pdev_entry->list, &pdev_list);
        mutex_unlock(&pdev_list_mutex);
index 537409d07ee730b55e358fcb1e9e6d515d27e1e8..1a409c5bc9bce687922ce5389150599c6e928f39 100644 (file)
@@ -947,6 +947,7 @@ static int aem_register_sensors(struct aem_data *data,
 
        /* Set up read-only sensors */
        while (ro->label) {
+               sysfs_attr_init(&sensors->dev_attr.attr);
                sensors->dev_attr.attr.name = ro->label;
                sensors->dev_attr.attr.mode = S_IRUGO;
                sensors->dev_attr.show = ro->show;
@@ -963,6 +964,7 @@ static int aem_register_sensors(struct aem_data *data,
 
        /* Set up read-write sensors */
        while (rw->label) {
+               sysfs_attr_init(&sensors->dev_attr.attr);
                sensors->dev_attr.attr.name = rw->label;
                sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
                sensors->dev_attr.show = rw->show;
index 06d4eafcf76b231fdc74e0418341e496387b78d9..41dbf8161ed7b4fb1bc29811c61f006152c9a41c 100644 (file)
@@ -358,6 +358,7 @@ static int create_sensor(struct ibmpex_bmc_data *data, int type,
        else if (type == POWER_SENSOR)
                sprintf(n, power_sensor_name_templates[func], "power", counter);
 
+       sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr);
        data->sensors[sensor].attr[func].dev_attr.attr.name = n;
        data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO;
        data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor;
index 92b42db43bcfd9e5706c41bfb9049a26fd967b93..b39f52e2752a7bca54a1bb25c7083382b01aa52e 100644 (file)
@@ -232,6 +232,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
 
        attr = &attrs->in;
        attr->index = channel;
+       sysfs_attr_init(&attr->dev_attr.attr);
        attr->dev_attr.attr.name  = attrs->in_name;
        attr->dev_attr.attr.mode  = S_IRUGO;
        attr->dev_attr.show = s3c_hwmon_ch_show;
@@ -249,6 +250,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
 
                attr = &attrs->label;
                attr->index = channel;
+               sysfs_attr_init(&attr->dev_attr.attr);
                attr->dev_attr.attr.name  = attrs->label_name;
                attr->dev_attr.attr.mode  = S_IRUGO;
                attr->dev_attr.show = s3c_hwmon_label_show;
index 435002dfc3caef7a418944e9e967af8a27c6b822..712baab3c83d58147237384fb168a4a6c3ceba1c 100644 (file)
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/gpio.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
index f706dba165cf6812fc364271d3086424ce12aa36..cc880c95e7de7dfee978319ebfb7aecd74a4f355 100644 (file)
@@ -681,13 +681,14 @@ static void bfin_spi_pump_transfers(unsigned long data)
        drv_data->cs_change = transfer->cs_change;
 
        /* Bits per word setup */
-       bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
-       if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) {
+       bits_per_word = transfer->bits_per_word ? :
+               message->spi->bits_per_word ? : 8;
+       if (bits_per_word % 16 == 0) {
                drv_data->n_bytes = bits_per_word/8;
                drv_data->len = (transfer->len) >> 1;
                cr_width = BIT_CTL_WORDSIZE;
                drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
-       } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) {
+       } else if (bits_per_word % 8 == 0) {
                drv_data->n_bytes = bits_per_word/8;
                drv_data->len = transfer->len;
                cr_width = 0;
index 97e0d52d72fdd65a365f3cab7027ae22c230582e..6075a1e727aee13dd3cd492b61d55edd81ee258e 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1996,7 +1996,7 @@ static void wait_for_dump_helpers(struct file *file)
  * is a special value that we use to trap recursive
  * core dumps
  */
-static int umh_pipe_setup(struct subprocess_info *info)
+static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
 {
        struct file *rp, *wp;
        struct fdtable *fdt;
index 3db5ba4568fc8efd30025a9e9906eb01a47f9c45..b3cc8586984e9748ff3c66e6c8703fa84c822c1c 100644 (file)
@@ -974,7 +974,7 @@ out_no_inode:
 out_no_read:
        printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n",
                __func__, s->s_id, iso_blknum, block);
-       goto out_freesbi;
+       goto out_freebh;
 out_bad_zone_size:
        printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n",
                sbi->s_log_zone_size);
@@ -989,6 +989,7 @@ out_unknown_format:
 
 out_freebh:
        brelse(bh);
+       brelse(pri_bh);
 out_freesbi:
        kfree(opt.iocharset);
        kfree(sbi);
index f67acbdda5e8c13fce54e51f72fe0882f2fa94da..dffeb3795af1d4204f8554447dbb2d2c33992429 100644 (file)
@@ -61,7 +61,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
 
 /*
  * Called when the clock was set to cancel the timers in the cancel
- * list.
+ * list. This will wake up processes waiting on these timers. The
+ * wake-up requires ctx->ticks to be non zero, therefore we increment
+ * it before calling wake_up_locked().
  */
 void timerfd_clock_was_set(void)
 {
@@ -76,6 +78,7 @@ void timerfd_clock_was_set(void)
                spin_lock_irqsave(&ctx->wqh.lock, flags);
                if (ctx->moffs.tv64 != moffs.tv64) {
                        ctx->moffs.tv64 = KTIME_MAX;
+                       ctx->ticks++;
                        wake_up_locked(&ctx->wqh);
                }
                spin_unlock_irqrestore(&ctx->wqh.lock, flags);
index f4213ba1ff853dad53d16d27b6cd713f01784ea7..7f782af286bfa0edd73a125cdcb892339025913a 100644 (file)
@@ -131,19 +131,34 @@ xfs_file_fsync(
 {
        struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
        struct xfs_trans        *tp;
        int                     error = 0;
        int                     log_flushed = 0;
 
        trace_xfs_file_fsync(ip);
 
-       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+       if (XFS_FORCED_SHUTDOWN(mp))
                return -XFS_ERROR(EIO);
 
        xfs_iflags_clear(ip, XFS_ITRUNCATED);
 
        xfs_ioend_wait(ip);
 
+       if (mp->m_flags & XFS_MOUNT_BARRIER) {
+               /*
+                * If we have an RT and/or log subvolume we need to make sure
+                * to flush the write cache the device used for file data
+                * first.  This is to ensure newly written file data make
+                * it to disk before logging the new inode size in case of
+                * an extending write.
+                */
+               if (XFS_IS_REALTIME_INODE(ip))
+                       xfs_blkdev_issue_flush(mp->m_rtdev_targp);
+               else if (mp->m_logdev_targp != mp->m_ddev_targp)
+                       xfs_blkdev_issue_flush(mp->m_ddev_targp);
+       }
+
        /*
         * We always need to make sure that the required inode state is safe on
         * disk.  The inode might be clean but we still might need to force the
@@ -175,9 +190,9 @@ xfs_file_fsync(
                 * updates.  The sync transaction will also force the log.
                 */
                xfs_iunlock(ip, XFS_ILOCK_SHARED);
-               tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
+               tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
                error = xfs_trans_reserve(tp, 0,
-                               XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
+                               XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
                if (error) {
                        xfs_trans_cancel(tp, 0);
                        return -error;
@@ -209,28 +224,25 @@ xfs_file_fsync(
                 * force the log.
                 */
                if (xfs_ipincount(ip)) {
-                       error = _xfs_log_force_lsn(ip->i_mount,
+                       error = _xfs_log_force_lsn(mp,
                                        ip->i_itemp->ili_last_lsn,
                                        XFS_LOG_SYNC, &log_flushed);
                }
                xfs_iunlock(ip, XFS_ILOCK_SHARED);
        }
 
-       if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
-               /*
-                * If the log write didn't issue an ordered tag we need
-                * to flush the disk cache for the data device now.
-                */
-               if (!log_flushed)
-                       xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp);
-
-               /*
-                * If this inode is on the RT dev we need to flush that
-                * cache as well.
-                */
-               if (XFS_IS_REALTIME_INODE(ip))
-                       xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
-       }
+       /*
+        * If we only have a single device, and the log force about was
+        * a no-op we might have to flush the data device cache here.
+        * This can only happen for fdatasync/O_DSYNC if we were overwriting
+        * an already allocated file and thus do not have any metadata to
+        * commit.
+        */
+       if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
+           mp->m_logdev_targp == mp->m_ddev_targp &&
+           !XFS_IS_REALTIME_INODE(ip) &&
+           !log_flushed)
+               xfs_blkdev_issue_flush(mp->m_ddev_targp);
 
        return -error;
 }
index dd21784525a8096ef76f6d6cd14c80a319918839..d44d92cd12b17c7645156b4754c39ea29b5b10e5 100644 (file)
@@ -182,7 +182,7 @@ xfs_vn_mknod(
        if (IS_POSIXACL(dir)) {
                default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT);
                if (IS_ERR(default_acl))
-                       return -PTR_ERR(default_acl);
+                       return PTR_ERR(default_acl);
 
                if (!default_acl)
                        mode &= ~current_umask();
index 1e3a7ce804dce2feb1956036eec54ac7ed27da36..a1a881e68a9aa86a1aa76c27931e02202a57a08d 100644 (file)
@@ -627,68 +627,6 @@ xfs_blkdev_put(
                blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
-/*
- * Try to write out the superblock using barriers.
- */
-STATIC int
-xfs_barrier_test(
-       xfs_mount_t     *mp)
-{
-       xfs_buf_t       *sbp = xfs_getsb(mp, 0);
-       int             error;
-
-       XFS_BUF_UNDONE(sbp);
-       XFS_BUF_UNREAD(sbp);
-       XFS_BUF_UNDELAYWRITE(sbp);
-       XFS_BUF_WRITE(sbp);
-       XFS_BUF_UNASYNC(sbp);
-       XFS_BUF_ORDERED(sbp);
-
-       xfsbdstrat(mp, sbp);
-       error = xfs_buf_iowait(sbp);
-
-       /*
-        * Clear all the flags we set and possible error state in the
-        * buffer.  We only did the write to try out whether barriers
-        * worked and shouldn't leave any traces in the superblock
-        * buffer.
-        */
-       XFS_BUF_DONE(sbp);
-       XFS_BUF_ERROR(sbp, 0);
-       XFS_BUF_UNORDERED(sbp);
-
-       xfs_buf_relse(sbp);
-       return error;
-}
-
-STATIC void
-xfs_mountfs_check_barriers(xfs_mount_t *mp)
-{
-       int error;
-
-       if (mp->m_logdev_targp != mp->m_ddev_targp) {
-               xfs_notice(mp,
-                 "Disabling barriers, not supported with external log device");
-               mp->m_flags &= ~XFS_MOUNT_BARRIER;
-               return;
-       }
-
-       if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
-               xfs_notice(mp,
-                       "Disabling barriers, underlying device is readonly");
-               mp->m_flags &= ~XFS_MOUNT_BARRIER;
-               return;
-       }
-
-       error = xfs_barrier_test(mp);
-       if (error) {
-               xfs_notice(mp,
-                       "Disabling barriers, trial barrier write failed");
-               mp->m_flags &= ~XFS_MOUNT_BARRIER;
-               return;
-       }
-}
-
 void
 xfs_blkdev_issue_flush(
        xfs_buftarg_t           *buftarg)
@@ -1240,14 +1178,6 @@ xfs_fs_remount(
                switch (token) {
                case Opt_barrier:
                        mp->m_flags |= XFS_MOUNT_BARRIER;
-
-                       /*
-                        * Test if barriers are actually working if we can,
-                        * else delay this check until the filesystem is
-                        * marked writeable.
-                        */
-                       if (!(mp->m_flags & XFS_MOUNT_RDONLY))
-                               xfs_mountfs_check_barriers(mp);
                        break;
                case Opt_nobarrier:
                        mp->m_flags &= ~XFS_MOUNT_BARRIER;
@@ -1282,8 +1212,6 @@ xfs_fs_remount(
        /* ro -> rw */
        if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
                mp->m_flags &= ~XFS_MOUNT_RDONLY;
-               if (mp->m_flags & XFS_MOUNT_BARRIER)
-                       xfs_mountfs_check_barriers(mp);
 
                /*
                 * If this is the first remount to writeable state we
@@ -1465,9 +1393,6 @@ xfs_fs_fill_super(
        if (error)
                goto out_free_sb;
 
-       if (mp->m_flags & XFS_MOUNT_BARRIER)
-               xfs_mountfs_check_barriers(mp);
-
        error = xfs_filestream_mount(mp);
        if (error)
                goto out_free_sb;
index 211930246f2073f4759a569936b79ab387003e21..41d5b8f2bf92d3fd3fae9773f667a0bf1cb42381 100644 (file)
@@ -1372,8 +1372,17 @@ xlog_sync(xlog_t         *log,
        XFS_BUF_ASYNC(bp);
        bp->b_flags |= XBF_LOG_BUFFER;
 
-       if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
+       if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
+               /*
+                * If we have an external log device, flush the data device
+                * before flushing the log to make sure all meta data
+                * written back from the AIL actually made it to disk
+                * before writing out the new log tail LSN in the log buffer.
+                */
+               if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
+                       xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
                XFS_BUF_ORDERED(bp);
+       }
 
        ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
        ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
index fcdcb5d5c99539871c448411dbdb0fddd2be56b8..d494001b12260a2f56b9337e01226f17dcee3634 100644 (file)
@@ -170,16 +170,6 @@ extern int __gpio_cansleep(unsigned gpio);
 
 extern int __gpio_to_irq(unsigned gpio);
 
-#define GPIOF_DIR_OUT  (0 << 0)
-#define GPIOF_DIR_IN   (1 << 0)
-
-#define GPIOF_INIT_LOW (0 << 1)
-#define GPIOF_INIT_HIGH        (1 << 1)
-
-#define GPIOF_IN               (GPIOF_DIR_IN)
-#define GPIOF_OUT_INIT_LOW     (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
-#define GPIOF_OUT_INIT_HIGH    (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
-
 /**
  * struct gpio - a structure describing a GPIO with configuration
  * @gpio:      the GPIO number
index d4646b48dc4a7b0074dc7491e7f66186f398e70e..18a1baf31f2d531493526a48132dc38a5f2f0f03 100644 (file)
@@ -188,6 +188,7 @@ struct clocksource {
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
        /* Watchdog related data, used by the framework */
        struct list_head wd_list;
+       cycle_t cs_last;
        cycle_t wd_last;
 #endif
 } ____cacheline_aligned;
index 32d47e710661e55879b5473f9850316948f484fe..17b5a0d80e4239cc10fceb670be29dbf4e49a0f0 100644 (file)
@@ -3,6 +3,17 @@
 
 /* see Documentation/gpio.txt */
 
+/* make these flag values available regardless of GPIO kconfig options */
+#define GPIOF_DIR_OUT  (0 << 0)
+#define GPIOF_DIR_IN   (1 << 0)
+
+#define GPIOF_INIT_LOW (0 << 1)
+#define GPIOF_INIT_HIGH        (1 << 1)
+
+#define GPIOF_IN               (GPIOF_DIR_IN)
+#define GPIOF_OUT_INIT_LOW     (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
+#define GPIOF_OUT_INIT_HIGH    (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
+
 #ifdef CONFIG_GENERIC_GPIO
 #include <asm/gpio.h>
 
index 6c12989839d9093f970b9f90d8dffe44dec6f7a2..f6efed0039edfdb06cbe1430588e51bf2db07aca 100644 (file)
@@ -414,6 +414,7 @@ enum
        TASKLET_SOFTIRQ,
        SCHED_SOFTIRQ,
        HRTIMER_SOFTIRQ,
+       RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
 
        NR_SOFTIRQS
 };
index d4a5c84c503d7307a577474bc0eb4d75cb7265f3..0da38cf7db7bddc8841d14620a9e831866afcfd2 100644 (file)
@@ -45,7 +45,7 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
 #endif
 
 
-struct key;
+struct cred;
 struct file;
 
 enum umh_wait {
@@ -62,7 +62,7 @@ struct subprocess_info {
        char **envp;
        enum umh_wait wait;
        int retval;
-       int (*init)(struct subprocess_info *info);
+       int (*init)(struct subprocess_info *info, struct cred *new);
        void (*cleanup)(struct subprocess_info *info);
        void *data;
 };
@@ -73,7 +73,7 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
 
 /* Set various pieces of state into the subprocess_info structure */
 void call_usermodehelper_setfns(struct subprocess_info *info,
-                   int (*init)(struct subprocess_info *info),
+                   int (*init)(struct subprocess_info *info, struct cred *new),
                    void (*cleanup)(struct subprocess_info *info),
                    void *data);
 
@@ -87,7 +87,7 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info);
 static inline int
 call_usermodehelper_fns(char *path, char **argv, char **envp,
                        enum umh_wait wait,
-                       int (*init)(struct subprocess_info *info),
+                       int (*init)(struct subprocess_info *info, struct cred *new),
                        void (*cleanup)(struct subprocess_info *), void *data)
 {
        struct subprocess_info *info;
index ae045ca7d356033d49c8a9650a618c206503f68c..1c09820df58564f8d0430d996998edc6f8d893c0 100644 (file)
@@ -20,7 +20,8 @@ struct softirq_action;
                         softirq_name(BLOCK_IOPOLL),    \
                         softirq_name(TASKLET),         \
                         softirq_name(SCHED),           \
-                        softirq_name(HRTIMER))
+                        softirq_name(HRTIMER),         \
+                        softirq_name(RCU))
 
 /**
  * irq_handler_entry - called immediately before the irq action handler
index ad6a81c58b44e2d5ae57c5dbe866da1cf1008a03..47613dfb7b28c340825493ed36a4c3347a39290d 100644 (file)
@@ -156,12 +156,6 @@ static int ____call_usermodehelper(void *data)
         */
        set_user_nice(current, 0);
 
-       if (sub_info->init) {
-               retval = sub_info->init(sub_info);
-               if (retval)
-                       goto fail;
-       }
-
        retval = -ENOMEM;
        new = prepare_kernel_cred(current);
        if (!new)
@@ -173,6 +167,14 @@ static int ____call_usermodehelper(void *data)
                                             new->cap_inheritable);
        spin_unlock(&umh_sysctl_lock);
 
+       if (sub_info->init) {
+               retval = sub_info->init(sub_info, new);
+               if (retval) {
+                       abort_creds(new);
+                       goto fail;
+               }
+       }
+
        commit_creds(new);
 
        retval = kernel_execve(sub_info->path,
@@ -388,7 +390,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
  * context in which call_usermodehelper_exec is called.
  */
 void call_usermodehelper_setfns(struct subprocess_info *info,
-                   int (*init)(struct subprocess_info *info),
+                   int (*init)(struct subprocess_info *info, struct cred *new),
                    void (*cleanup)(struct subprocess_info *info),
                    void *data)
 {
index 89419ff92e996c1e52fade38475ba14604a8f7bd..7e59ffb3d0ba487c0474270a476b25cbc96d2ac1 100644 (file)
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state;
 int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 
+#ifdef CONFIG_RCU_BOOST
+
 /*
  * Control variables for per-CPU and per-rcu_node kthreads.  These
  * handle all flavors of RCU.
@@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
 static char rcu_kthreads_spawnable;
 
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
-static void invoke_rcu_cpu_kthread(void);
+static void invoke_rcu_core(void);
+static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 
 #define RCU_KTHREAD_PRIO 1     /* RT priority for per-CPU kthreads. */
 
@@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
        int need_report = 0;
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp;
-       struct task_struct *t;
 
-       /* Stop the CPU's kthread. */
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t != NULL) {
-               per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
-               kthread_stop(t);
-       }
+       rcu_stop_cpu_kthread(cpu);
 
        /* Exclude any attempts to start a new grace period. */
        raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 
        /* Re-raise the RCU softirq if there are callbacks remaining. */
        if (cpu_has_callbacks_ready_to_invoke(rdp))
-               invoke_rcu_cpu_kthread();
+               invoke_rcu_core();
 }
 
 /*
@@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user)
        }
        rcu_preempt_check_callbacks(cpu);
        if (rcu_pending(cpu))
-               invoke_rcu_cpu_kthread();
+               invoke_rcu_core();
 }
 
 #ifdef CONFIG_SMP
@@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
        }
 
        /* If there are callbacks ready, invoke them. */
-       rcu_do_batch(rsp, rdp);
+       if (cpu_has_callbacks_ready_to_invoke(rdp))
+               invoke_rcu_callbacks(rsp, rdp);
 }
 
 /*
  * Do softirq processing for the current CPU.
  */
-static void rcu_process_callbacks(void)
+static void rcu_process_callbacks(struct softirq_action *unused)
 {
        __rcu_process_callbacks(&rcu_sched_state,
                                &__get_cpu_var(rcu_sched_data));
@@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void)
  * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
  * cannot disappear out from under us.
  */
-static void invoke_rcu_cpu_kthread(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __this_cpu_write(rcu_cpu_has_work, 1);
-       if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
-               local_irq_restore(flags);
-               return;
-       }
-       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
-       local_irq_restore(flags);
-}
-
-/*
- * Wake up the specified per-rcu_node-structure kthread.
- * Because the per-rcu_node kthreads are immortal, we don't need
- * to do anything to keep them alive.
- */
-static void invoke_rcu_node_kthread(struct rcu_node *rnp)
-{
-       struct task_struct *t;
-
-       t = rnp->node_kthread_task;
-       if (t != NULL)
-               wake_up_process(t);
-}
-
-/*
- * Set the specified CPU's kthread to run RT or not, as specified by
- * the to_rt argument.  The CPU-hotplug locks are held, so the task
- * is not going away.
- */
-static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
-{
-       int policy;
-       struct sched_param sp;
-       struct task_struct *t;
-
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t == NULL)
-               return;
-       if (to_rt) {
-               policy = SCHED_FIFO;
-               sp.sched_priority = RCU_KTHREAD_PRIO;
-       } else {
-               policy = SCHED_NORMAL;
-               sp.sched_priority = 0;
-       }
-       sched_setscheduler_nocheck(t, policy, &sp);
-}
-
-/*
- * Timer handler to initiate the waking up of per-CPU kthreads that
- * have yielded the CPU due to excess numbers of RCU callbacks.
- * We wake up the per-rcu_node kthread, which in turn will wake up
- * the booster kthread.
- */
-static void rcu_cpu_kthread_timer(unsigned long arg)
-{
-       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
-       struct rcu_node *rnp = rdp->mynode;
-
-       atomic_or(rdp->grpmask, &rnp->wakemask);
-       invoke_rcu_node_kthread(rnp);
-}
-
-/*
- * Drop to non-real-time priority and yield, but only after posting a
- * timer that will cause us to regain our real-time priority if we
- * remain preempted.  Either way, we restore our real-time priority
- * before returning.
- */
-static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
-{
-       struct sched_param sp;
-       struct timer_list yield_timer;
-
-       setup_timer_on_stack(&yield_timer, f, arg);
-       mod_timer(&yield_timer, jiffies + 2);
-       sp.sched_priority = 0;
-       sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
-       set_user_nice(current, 19);
-       schedule();
-       sp.sched_priority = RCU_KTHREAD_PRIO;
-       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
-       del_timer(&yield_timer);
-}
-
-/*
- * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
- * This can happen while the corresponding CPU is either coming online
- * or going offline.  We cannot wait until the CPU is fully online
- * before starting the kthread, because the various notifier functions
- * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
- * the corresponding CPU is online.
- *
- * Return 1 if the kthread needs to stop, 0 otherwise.
- *
- * Caller must disable bh.  This function can momentarily enable it.
- */
-static int rcu_cpu_kthread_should_stop(int cpu)
-{
-       while (cpu_is_offline(cpu) ||
-              !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
-              smp_processor_id() != cpu) {
-               if (kthread_should_stop())
-                       return 1;
-               per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
-               per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
-               local_bh_enable();
-               schedule_timeout_uninterruptible(1);
-               if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
-                       set_cpus_allowed_ptr(current, cpumask_of(cpu));
-               local_bh_disable();
-       }
-       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
-       return 0;
-}
-
-/*
- * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
- * earlier RCU softirq.
- */
-static int rcu_cpu_kthread(void *arg)
-{
-       int cpu = (int)(long)arg;
-       unsigned long flags;
-       int spincnt = 0;
-       unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
-       char work;
-       char *workp = &per_cpu(rcu_cpu_has_work, cpu);
-
-       for (;;) {
-               *statusp = RCU_KTHREAD_WAITING;
-               rcu_wait(*workp != 0 || kthread_should_stop());
-               local_bh_disable();
-               if (rcu_cpu_kthread_should_stop(cpu)) {
-                       local_bh_enable();
-                       break;
-               }
-               *statusp = RCU_KTHREAD_RUNNING;
-               per_cpu(rcu_cpu_kthread_loops, cpu)++;
-               local_irq_save(flags);
-               work = *workp;
-               *workp = 0;
-               local_irq_restore(flags);
-               if (work)
-                       rcu_process_callbacks();
-               local_bh_enable();
-               if (*workp != 0)
-                       spincnt++;
-               else
-                       spincnt = 0;
-               if (spincnt > 10) {
-                       *statusp = RCU_KTHREAD_YIELDING;
-                       rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
-                       spincnt = 0;
-               }
-       }
-       *statusp = RCU_KTHREAD_STOPPED;
-       return 0;
-}
-
-/*
- * Spawn a per-CPU kthread, setting up affinity and priority.
- * Because the CPU hotplug lock is held, no other CPU will be attempting
- * to manipulate rcu_cpu_kthread_task.  There might be another CPU
- * attempting to access it during boot, but the locking in kthread_bind()
- * will enforce sufficient ordering.
- */
-static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
+static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       struct sched_param sp;
-       struct task_struct *t;
-
-       if (!rcu_kthreads_spawnable ||
-           per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
-               return 0;
-       t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
-       if (IS_ERR(t))
-               return PTR_ERR(t);
-       kthread_bind(t, cpu);
-       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
-       WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
-       per_cpu(rcu_cpu_kthread_task, cpu) = t;
-       sp.sched_priority = RCU_KTHREAD_PRIO;
-       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-       return 0;
-}
-
-/*
- * Per-rcu_node kthread, which is in charge of waking up the per-CPU
- * kthreads when needed.  We ignore requests to wake up kthreads
- * for offline CPUs, which is OK because force_quiescent_state()
- * takes care of this case.
- */
-static int rcu_node_kthread(void *arg)
-{
-       int cpu;
-       unsigned long flags;
-       unsigned long mask;
-       struct rcu_node *rnp = (struct rcu_node *)arg;
-       struct sched_param sp;
-       struct task_struct *t;
-
-       for (;;) {
-               rnp->node_kthread_status = RCU_KTHREAD_WAITING;
-               rcu_wait(atomic_read(&rnp->wakemask) != 0);
-               rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               mask = atomic_xchg(&rnp->wakemask, 0);
-               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
-                       if ((mask & 0x1) == 0)
-                               continue;
-                       preempt_disable();
-                       t = per_cpu(rcu_cpu_kthread_task, cpu);
-                       if (!cpu_online(cpu) || t == NULL) {
-                               preempt_enable();
-                               continue;
-                       }
-                       per_cpu(rcu_cpu_has_work, cpu) = 1;
-                       sp.sched_priority = RCU_KTHREAD_PRIO;
-                       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-                       preempt_enable();
-               }
-       }
-       /* NOTREACHED */
-       rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
-       return 0;
-}
-
-/*
- * Set the per-rcu_node kthread's affinity to cover all CPUs that are
- * served by the rcu_node in question.  The CPU hotplug lock is still
- * held, so the value of rnp->qsmaskinit will be stable.
- *
- * We don't include outgoingcpu in the affinity set, use -1 if there is
- * no outgoing CPU.  If there are no CPUs left in the affinity set,
- * this function allows the kthread to execute on any CPU.
- */
-static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
-{
-       cpumask_var_t cm;
-       int cpu;
-       unsigned long mask = rnp->qsmaskinit;
-
-       if (rnp->node_kthread_task == NULL)
-               return;
-       if (!alloc_cpumask_var(&cm, GFP_KERNEL))
+       if (likely(!rsp->boost)) {
+               rcu_do_batch(rsp, rdp);
                return;
-       cpumask_clear(cm);
-       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
-               if ((mask & 0x1) && cpu != outgoingcpu)
-                       cpumask_set_cpu(cpu, cm);
-       if (cpumask_weight(cm) == 0) {
-               cpumask_setall(cm);
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
-                       cpumask_clear_cpu(cpu, cm);
-               WARN_ON_ONCE(cpumask_weight(cm) == 0);
        }
-       set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
-       rcu_boost_kthread_setaffinity(rnp, cm);
-       free_cpumask_var(cm);
+       invoke_rcu_callbacks_kthread();
 }
 
-/*
- * Spawn a per-rcu_node kthread, setting priority and affinity.
- * Called during boot before online/offline can happen, or, if
- * during runtime, with the main CPU-hotplug locks held.  So only
- * one of these can be executing at a time.
- */
-static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
-                                               struct rcu_node *rnp)
+static void invoke_rcu_core(void)
 {
-       unsigned long flags;
-       int rnp_index = rnp - &rsp->node[0];
-       struct sched_param sp;
-       struct task_struct *t;
-
-       if (!rcu_kthreads_spawnable ||
-           rnp->qsmaskinit == 0)
-               return 0;
-       if (rnp->node_kthread_task == NULL) {
-               t = kthread_create(rcu_node_kthread, (void *)rnp,
-                                  "rcun%d", rnp_index);
-               if (IS_ERR(t))
-                       return PTR_ERR(t);
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               rnp->node_kthread_task = t;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               sp.sched_priority = 99;
-               sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-       }
-       return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
+       raise_softirq(RCU_SOFTIRQ);
 }
 
-static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
-
-/*
- * Spawn all kthreads -- called as soon as the scheduler is running.
- */
-static int __init rcu_spawn_kthreads(void)
-{
-       int cpu;
-       struct rcu_node *rnp;
-       struct task_struct *t;
-
-       rcu_kthreads_spawnable = 1;
-       for_each_possible_cpu(cpu) {
-               per_cpu(rcu_cpu_has_work, cpu) = 0;
-               if (cpu_online(cpu)) {
-                       (void)rcu_spawn_one_cpu_kthread(cpu);
-                       t = per_cpu(rcu_cpu_kthread_task, cpu);
-                       if (t)
-                               wake_up_process(t);
-               }
-       }
-       rnp = rcu_get_root(rcu_state);
-       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-       if (rnp->node_kthread_task)
-               wake_up_process(rnp->node_kthread_task);
-       if (NUM_RCU_NODES > 1) {
-               rcu_for_each_leaf_node(rcu_state, rnp) {
-                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-                       t = rnp->node_kthread_task;
-                       if (t)
-                               wake_up_process(t);
-                       rcu_wake_one_boost_kthread(rnp);
-               }
-       }
-       return 0;
-}
-early_initcall(rcu_spawn_kthreads);
-
 static void
 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
           struct rcu_state *rsp)
@@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
        rcu_preempt_init_percpu_data(cpu);
 }
 
-static void __cpuinit rcu_prepare_kthreads(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
-       struct rcu_node *rnp = rdp->mynode;
-
-       /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
-       if (rcu_kthreads_spawnable) {
-               (void)rcu_spawn_one_cpu_kthread(cpu);
-               if (rnp->node_kthread_task == NULL)
-                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-       }
-}
-
-/*
- * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
- * but the RCU threads are woken on demand, and if demand is low this
- * could be a while triggering the hung task watchdog.
- *
- * In order to avoid this, poke all tasks once the CPU is fully
- * up and running.
- */
-static void __cpuinit rcu_online_kthreads(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
-       struct rcu_node *rnp = rdp->mynode;
-       struct task_struct *t;
-
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t)
-               wake_up_process(t);
-
-       t = rnp->node_kthread_task;
-       if (t)
-               wake_up_process(t);
-
-       rcu_wake_one_boost_kthread(rnp);
-}
-
 /*
  * Handle CPU online/offline notification events.
  */
@@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
                rcu_prepare_kthreads(cpu);
                break;
        case CPU_ONLINE:
-               rcu_online_kthreads(cpu);
        case CPU_DOWN_FAILED:
                rcu_node_kthread_setaffinity(rnp, -1);
                rcu_cpu_kthread_setrt(cpu, 1);
@@ -2410,6 +2049,7 @@ void __init rcu_init(void)
        rcu_init_one(&rcu_sched_state, &rcu_sched_data);
        rcu_init_one(&rcu_bh_state, &rcu_bh_data);
        __rcu_init_preempt();
+        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
        /*
         * We don't need protection against CPU-hotplug here because
index 7b9a08b4aaea01d48eacb8781296a7be205fb898..01b2ccda26fbf82880cc1d4a50bf9f2877e91bba 100644 (file)
@@ -369,6 +369,7 @@ struct rcu_state {
                                                /*  period because */
                                                /*  force_quiescent_state() */
                                                /*  was running. */
+       u8      boost;                          /* Subject to priority boost. */
        unsigned long gpnum;                    /* Current gp number. */
        unsigned long completed;                /* # of last completed gp. */
 
@@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
                                      unsigned long flags);
+static void rcu_stop_cpu_kthread(int cpu);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static void rcu_print_task_stall(struct rcu_node *rnp);
@@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void);
 static void __init __rcu_init_preempt(void);
 static void rcu_needs_cpu_flush(void);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
+static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
+static void invoke_rcu_callbacks_kthread(void);
+#ifdef CONFIG_RCU_BOOST
+static void rcu_preempt_do_callbacks(void);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
                                          cpumask_var_t cm);
-static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
                                                 struct rcu_node *rnp,
                                                 int rnp_index);
+static void invoke_rcu_node_kthread(struct rcu_node *rnp);
+static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
+static void __cpuinit rcu_prepare_kthreads(int cpu);
 
 #endif /* #ifndef RCU_TREE_NONCORE */
index c8bff3099a89eeeccf9e3d330a16f6c4d9a2c340..14dc7dd0090220f717f83f666fb2a30a32f66143 100644 (file)
@@ -602,6 +602,15 @@ static void rcu_preempt_process_callbacks(void)
                                &__get_cpu_var(rcu_preempt_data));
 }
 
+#ifdef CONFIG_RCU_BOOST
+
+static void rcu_preempt_do_callbacks(void)
+{
+       rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
+}
+
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
 /*
  * Queue a preemptible-RCU callback for invocation after a grace period.
  */
@@ -1248,6 +1257,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
        }
 }
 
+/*
+ * Wake up the per-CPU kthread to invoke RCU callbacks.
+ */
+static void invoke_rcu_callbacks_kthread(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __this_cpu_write(rcu_cpu_has_work, 1);
+       if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
+               local_irq_restore(flags);
+               return;
+       }
+       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
+       local_irq_restore(flags);
+}
+
 /*
  * Set the affinity of the boost kthread.  The CPU-hotplug locks are
  * held, so no one should be messing with the existence of the boost
@@ -1288,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
 
        if (&rcu_preempt_state != rsp)
                return 0;
+       rsp->boost = 1;
        if (rnp->boost_kthread_task != NULL)
                return 0;
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1299,13 +1326,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        sp.sched_priority = RCU_KTHREAD_PRIO;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+       wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
        return 0;
 }
 
-static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Stop the RCU's per-CPU kthread when its CPU goes offline,.
+ */
+static void rcu_stop_cpu_kthread(int cpu)
 {
-       if (rnp->boost_kthread_task)
-               wake_up_process(rnp->boost_kthread_task);
+       struct task_struct *t;
+
+       /* Stop the CPU's kthread. */
+       t = per_cpu(rcu_cpu_kthread_task, cpu);
+       if (t != NULL) {
+               per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
+               kthread_stop(t);
+       }
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_kthread_do_work(void)
+{
+       rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
+       rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+       rcu_preempt_do_callbacks();
+}
+
+/*
+ * Wake up the specified per-rcu_node-structure kthread.
+ * Because the per-rcu_node kthreads are immortal, we don't need
+ * to do anything to keep them alive.
+ */
+static void invoke_rcu_node_kthread(struct rcu_node *rnp)
+{
+       struct task_struct *t;
+
+       t = rnp->node_kthread_task;
+       if (t != NULL)
+               wake_up_process(t);
+}
+
+/*
+ * Set the specified CPU's kthread to run RT or not, as specified by
+ * the to_rt argument.  The CPU-hotplug locks are held, so the task
+ * is not going away.
+ */
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
+{
+       int policy;
+       struct sched_param sp;
+       struct task_struct *t;
+
+       t = per_cpu(rcu_cpu_kthread_task, cpu);
+       if (t == NULL)
+               return;
+       if (to_rt) {
+               policy = SCHED_FIFO;
+               sp.sched_priority = RCU_KTHREAD_PRIO;
+       } else {
+               policy = SCHED_NORMAL;
+               sp.sched_priority = 0;
+       }
+       sched_setscheduler_nocheck(t, policy, &sp);
+}
+
+/*
+ * Timer handler to initiate the waking up of per-CPU kthreads that
+ * have yielded the CPU due to excess numbers of RCU callbacks.
+ * We wake up the per-rcu_node kthread, which in turn will wake up
+ * the booster kthread.
+ */
+static void rcu_cpu_kthread_timer(unsigned long arg)
+{
+       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
+       struct rcu_node *rnp = rdp->mynode;
+
+       atomic_or(rdp->grpmask, &rnp->wakemask);
+       invoke_rcu_node_kthread(rnp);
+}
+
+/*
+ * Drop to non-real-time priority and yield, but only after posting a
+ * timer that will cause us to regain our real-time priority if we
+ * remain preempted.  Either way, we restore our real-time priority
+ * before returning.
+ */
+static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
+{
+       struct sched_param sp;
+       struct timer_list yield_timer;
+
+       setup_timer_on_stack(&yield_timer, f, arg);
+       mod_timer(&yield_timer, jiffies + 2);
+       sp.sched_priority = 0;
+       sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
+       set_user_nice(current, 19);
+       schedule();
+       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+       del_timer(&yield_timer);
+}
+
+/*
+ * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
+ * This can happen while the corresponding CPU is either coming online
+ * or going offline.  We cannot wait until the CPU is fully online
+ * before starting the kthread, because the various notifier functions
+ * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
+ * the corresponding CPU is online.
+ *
+ * Return 1 if the kthread needs to stop, 0 otherwise.
+ *
+ * Caller must disable bh.  This function can momentarily enable it.
+ */
+static int rcu_cpu_kthread_should_stop(int cpu)
+{
+       while (cpu_is_offline(cpu) ||
+              !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
+              smp_processor_id() != cpu) {
+               if (kthread_should_stop())
+                       return 1;
+               per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+               per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
+               local_bh_enable();
+               schedule_timeout_uninterruptible(1);
+               if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
+                       set_cpus_allowed_ptr(current, cpumask_of(cpu));
+               local_bh_disable();
+       }
+       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
+       return 0;
+}
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
+ * earlier RCU softirq.
+ */
+static int rcu_cpu_kthread(void *arg)
+{
+       int cpu = (int)(long)arg;
+       unsigned long flags;
+       int spincnt = 0;
+       unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
+       char work;
+       char *workp = &per_cpu(rcu_cpu_has_work, cpu);
+
+       for (;;) {
+               *statusp = RCU_KTHREAD_WAITING;
+               rcu_wait(*workp != 0 || kthread_should_stop());
+               local_bh_disable();
+               if (rcu_cpu_kthread_should_stop(cpu)) {
+                       local_bh_enable();
+                       break;
+               }
+               *statusp = RCU_KTHREAD_RUNNING;
+               per_cpu(rcu_cpu_kthread_loops, cpu)++;
+               local_irq_save(flags);
+               work = *workp;
+               *workp = 0;
+               local_irq_restore(flags);
+               if (work)
+                       rcu_kthread_do_work();
+               local_bh_enable();
+               if (*workp != 0)
+                       spincnt++;
+               else
+                       spincnt = 0;
+               if (spincnt > 10) {
+                       *statusp = RCU_KTHREAD_YIELDING;
+                       rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
+                       spincnt = 0;
+               }
+       }
+       *statusp = RCU_KTHREAD_STOPPED;
+       return 0;
+}
+
+/*
+ * Spawn a per-CPU kthread, setting up affinity and priority.
+ * Because the CPU hotplug lock is held, no other CPU will be attempting
+ * to manipulate rcu_cpu_kthread_task.  There might be another CPU
+ * attempting to access it during boot, but the locking in kthread_bind()
+ * will enforce sufficient ordering.
+ *
+ * Please note that we cannot simply refuse to wake up the per-CPU
+ * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
+ * which can result in softlockup complaints if the task ends up being
+ * idle for more than a couple of minutes.
+ *
+ * However, please note also that we cannot bind the per-CPU kthread to its
+ * CPU until that CPU is fully online.  We also cannot wait until the
+ * CPU is fully online before we create its per-CPU kthread, as this would
+ * deadlock the system when CPU notifiers tried waiting for grace
+ * periods.  So we bind the per-CPU kthread to its CPU only if the CPU
+ * is online.  If its CPU is not yet fully online, then the code in
+ * rcu_cpu_kthread() will wait until it is fully online, and then do
+ * the binding.
+ */
+static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
+{
+       struct sched_param sp;
+       struct task_struct *t;
+
+       if (!rcu_kthreads_spawnable ||
+           per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
+               return 0;
+       t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
+       if (IS_ERR(t))
+               return PTR_ERR(t);
+       if (cpu_online(cpu))
+               kthread_bind(t, cpu);
+       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
+       WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
+       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+       per_cpu(rcu_cpu_kthread_task, cpu) = t;
+       wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
+       return 0;
+}
+
+/*
+ * Per-rcu_node kthread, which is in charge of waking up the per-CPU
+ * kthreads when needed.  We ignore requests to wake up kthreads
+ * for offline CPUs, which is OK because force_quiescent_state()
+ * takes care of this case.
+ */
+static int rcu_node_kthread(void *arg)
+{
+       int cpu;
+       unsigned long flags;
+       unsigned long mask;
+       struct rcu_node *rnp = (struct rcu_node *)arg;
+       struct sched_param sp;
+       struct task_struct *t;
+
+       for (;;) {
+               rnp->node_kthread_status = RCU_KTHREAD_WAITING;
+               rcu_wait(atomic_read(&rnp->wakemask) != 0);
+               rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               mask = atomic_xchg(&rnp->wakemask, 0);
+               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
+               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
+                       if ((mask & 0x1) == 0)
+                               continue;
+                       preempt_disable();
+                       t = per_cpu(rcu_cpu_kthread_task, cpu);
+                       if (!cpu_online(cpu) || t == NULL) {
+                               preempt_enable();
+                               continue;
+                       }
+                       per_cpu(rcu_cpu_has_work, cpu) = 1;
+                       sp.sched_priority = RCU_KTHREAD_PRIO;
+                       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+                       preempt_enable();
+               }
+       }
+       /* NOTREACHED */
+       rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
+       return 0;
+}
+
+/*
+ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
+ * served by the rcu_node in question.  The CPU hotplug lock is still
+ * held, so the value of rnp->qsmaskinit will be stable.
+ *
+ * We don't include outgoingcpu in the affinity set, use -1 if there is
+ * no outgoing CPU.  If there are no CPUs left in the affinity set,
+ * this function allows the kthread to execute on any CPU.
+ */
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+{
+       cpumask_var_t cm;
+       int cpu;
+       unsigned long mask = rnp->qsmaskinit;
+
+       if (rnp->node_kthread_task == NULL)
+               return;
+       if (!alloc_cpumask_var(&cm, GFP_KERNEL))
+               return;
+       cpumask_clear(cm);
+       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
+               if ((mask & 0x1) && cpu != outgoingcpu)
+                       cpumask_set_cpu(cpu, cm);
+       if (cpumask_weight(cm) == 0) {
+               cpumask_setall(cm);
+               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
+                       cpumask_clear_cpu(cpu, cm);
+               WARN_ON_ONCE(cpumask_weight(cm) == 0);
+       }
+       set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
+       rcu_boost_kthread_setaffinity(rnp, cm);
+       free_cpumask_var(cm);
+}
+
+/*
+ * Spawn a per-rcu_node kthread, setting priority and affinity.
+ * Called during boot before online/offline can happen, or, if
+ * during runtime, with the main CPU-hotplug locks held.  So only
+ * one of these can be executing at a time.
+ */
+static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
+                                               struct rcu_node *rnp)
+{
+       unsigned long flags;
+       int rnp_index = rnp - &rsp->node[0];
+       struct sched_param sp;
+       struct task_struct *t;
+
+       if (!rcu_kthreads_spawnable ||
+           rnp->qsmaskinit == 0)
+               return 0;
+       if (rnp->node_kthread_task == NULL) {
+               t = kthread_create(rcu_node_kthread, (void *)rnp,
+                                  "rcun%d", rnp_index);
+               if (IS_ERR(t))
+                       return PTR_ERR(t);
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               rnp->node_kthread_task = t;
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               sp.sched_priority = 99;
+               sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+               wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
+       }
+       return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
+}
+
+/*
+ * Spawn all kthreads -- called as soon as the scheduler is running.
+ */
+static int __init rcu_spawn_kthreads(void)
+{
+       int cpu;
+       struct rcu_node *rnp;
+
+       rcu_kthreads_spawnable = 1;
+       for_each_possible_cpu(cpu) {
+               per_cpu(rcu_cpu_has_work, cpu) = 0;
+               if (cpu_online(cpu))
+                       (void)rcu_spawn_one_cpu_kthread(cpu);
+       }
+       rnp = rcu_get_root(rcu_state);
+       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       if (NUM_RCU_NODES > 1) {
+               rcu_for_each_leaf_node(rcu_state, rnp)
+                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       }
+       return 0;
+}
+early_initcall(rcu_spawn_kthreads);
+
+static void __cpuinit rcu_prepare_kthreads(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
+       struct rcu_node *rnp = rdp->mynode;
+
+       /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
+       if (rcu_kthreads_spawnable) {
+               (void)rcu_spawn_one_cpu_kthread(cpu);
+               if (rnp->node_kthread_task == NULL)
+                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       }
 }
 
 #else /* #ifdef CONFIG_RCU_BOOST */
@@ -1315,23 +1701,32 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
-                                         cpumask_var_t cm)
+static void invoke_rcu_callbacks_kthread(void)
 {
+       WARN_ON_ONCE(1);
 }
 
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
 {
 }
 
-static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
-                                                struct rcu_node *rnp,
-                                                int rnp_index)
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void rcu_stop_cpu_kthread(int cpu)
+{
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+{
+}
+
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
 {
-       return 0;
 }
 
-static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
+static void __cpuinit rcu_prepare_kthreads(int cpu)
 {
 }
 
@@ -1509,7 +1904,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
  *
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
- * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked
+ * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
  * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
  */
 int rcu_needs_cpu(int cpu)
@@ -1560,7 +1955,7 @@ int rcu_needs_cpu(int cpu)
 
        /* If RCU callbacks are still pending, RCU still needs this CPU. */
        if (c)
-               invoke_rcu_cpu_kthread();
+               invoke_rcu_core();
        return c;
 }
 
index 9678cc3650f5e9c8f4e3eb409d26e9c4e784e7d5..4e144876dc68208b89931518c2d64d1433def061 100644 (file)
@@ -46,6 +46,8 @@
 #define RCU_TREE_NONCORE
 #include "rcutree.h"
 
+#ifdef CONFIG_RCU_BOOST
+
 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
@@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status)
        return "SRWOY"[kthread_status];
 }
 
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
 static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
 {
        if (!rdp->beenonline)
@@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                   rdp->dynticks_fqs);
 #endif /* #ifdef CONFIG_NO_HZ */
        seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
-       seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld",
+       seq_printf(m, " ql=%ld qs=%c%c%c%c",
                   rdp->qlen,
                   ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
                        rdp->nxttail[RCU_NEXT_TAIL]],
@@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                        rdp->nxttail[RCU_NEXT_READY_TAIL]],
                   ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
                        rdp->nxttail[RCU_WAIT_TAIL]],
-                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
+                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
+#ifdef CONFIG_RCU_BOOST
+       seq_printf(m, " kt=%d/%c/%d ktl=%x",
                   per_cpu(rcu_cpu_has_work, rdp->cpu),
                   convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
                                          rdp->cpu)),
                   per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
-                  per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff,
-                  rdp->blimit);
+                  per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+       seq_printf(m, " b=%ld", rdp->blimit);
        seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
                   rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
 }
@@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
                   rdp->dynticks_fqs);
 #endif /* #ifdef CONFIG_NO_HZ */
        seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
-       seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen,
+       seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
                   ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
                        rdp->nxttail[RCU_NEXT_TAIL]],
                   ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
                        rdp->nxttail[RCU_NEXT_READY_TAIL]],
                   ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
                        rdp->nxttail[RCU_WAIT_TAIL]],
-                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
+                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
+#ifdef CONFIG_RCU_BOOST
+       seq_printf(m, ",%d,\"%c\"",
                   per_cpu(rcu_cpu_has_work, rdp->cpu),
                   convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
-                                         rdp->cpu)),
-                  rdp->blimit);
+                                         rdp->cpu)));
+#endif /* #ifdef CONFIG_RCU_BOOST */
+       seq_printf(m, ",%ld", rdp->blimit);
        seq_printf(m, ",%lu,%lu,%lu\n",
                   rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
 }
@@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
 #ifdef CONFIG_NO_HZ
        seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
 #endif /* #ifdef CONFIG_NO_HZ */
-       seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
+       seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
+#ifdef CONFIG_RCU_BOOST
+       seq_puts(m, "\"kt\",\"ktl\"");
+#endif /* #ifdef CONFIG_RCU_BOOST */
+       seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
 #ifdef CONFIG_TREE_PREEMPT_RCU
        seq_puts(m, "\"rcu_preempt:\"\n");
        PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
index 13960170cad4d91f0ec00dba8e0132d64e7c68fd..40cf63ddd4b3d740d2620ddbf1fa245830b1d703 100644 (file)
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
 char *softirq_to_name[NR_SOFTIRQS] = {
        "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
-       "TASKLET", "SCHED", "HRTIMER"
+       "TASKLET", "SCHED", "HRTIMER", "RCU"
 };
 
 /*
index 1c95fd677328e4fe3d0e582fc62284d9ec150a68..e0980f0d9a0ad2d559b98c12f26317b55044cad1 100644 (file)
@@ -185,7 +185,6 @@ static struct clocksource *watchdog;
 static struct timer_list watchdog_timer;
 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
 static DEFINE_SPINLOCK(watchdog_lock);
-static cycle_t watchdog_last;
 static int watchdog_running;
 
 static int clocksource_watchdog_kthread(void *data);
@@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
        if (!watchdog_running)
                goto out;
 
-       wdnow = watchdog->read(watchdog);
-       wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
-                                    watchdog->mult, watchdog->shift);
-       watchdog_last = wdnow;
-
        list_for_each_entry(cs, &watchdog_list, wd_list) {
 
                /* Clocksource already marked unstable? */
@@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
                        continue;
                }
 
+               local_irq_disable();
                csnow = cs->read(cs);
+               wdnow = watchdog->read(watchdog);
+               local_irq_enable();
 
                /* Clocksource initialized ? */
                if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
                        cs->flags |= CLOCK_SOURCE_WATCHDOG;
-                       cs->wd_last = csnow;
+                       cs->wd_last = wdnow;
+                       cs->cs_last = csnow;
                        continue;
                }
 
-               /* Check the deviation from the watchdog clocksource. */
-               cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
+               wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
+                                            watchdog->mult, watchdog->shift);
+
+               cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
                                             cs->mask, cs->mult, cs->shift);
-               cs->wd_last = csnow;
+               cs->cs_last = csnow;
+               cs->wd_last = wdnow;
+
+               /* Check the deviation from the watchdog clocksource. */
                if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
                        clocksource_unstable(cs, cs_nsec - wd_nsec);
                        continue;
@@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
                return;
        init_timer(&watchdog_timer);
        watchdog_timer.function = clocksource_watchdog;
-       watchdog_last = watchdog->read(watchdog);
        watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
        add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
        watchdog_running = 1;
index dff763b7baf172d9d7b90c8f0dddc0427a83a1c4..1f06468a10d7a65ad8c0bffa8e13c0eaf44a8ad4 100644 (file)
@@ -240,13 +240,10 @@ static const char **find_next(void *v, loff_t *pos)
        const char **fmt = v;
        int start_index;
 
-       if (!fmt)
-               fmt = __start___trace_bprintk_fmt + *pos;
-
        start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
 
        if (*pos < start_index)
-               return fmt;
+               return __start___trace_bprintk_fmt + *pos;
 
        return find_next_mod_format(start_index, v, fmt, pos);
 }
index 0eb463ea88dd71326b70342f8492df873a6b5df1..27dfd3b82b0f39cfcd38ff8b2a02c55151651f30 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -112,9 +112,9 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
        kmem_cache_free(anon_vma_cachep, anon_vma);
 }
 
-static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
+static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
 {
-       return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
+       return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
 }
 
 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
@@ -159,7 +159,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                struct mm_struct *mm = vma->vm_mm;
                struct anon_vma *allocated;
 
-               avc = anon_vma_chain_alloc();
+               avc = anon_vma_chain_alloc(GFP_KERNEL);
                if (!avc)
                        goto out_enomem;
 
@@ -200,6 +200,32 @@ int anon_vma_prepare(struct vm_area_struct *vma)
        return -ENOMEM;
 }
 
+/*
+ * This is a useful helper function for locking the anon_vma root as
+ * we traverse the vma->anon_vma_chain, looping over anon_vma's that
+ * have the same vma.
+ *
+ * Such anon_vma's should have the same root, so you'd expect to see
+ * just a single mutex_lock for the whole traversal.
+ */
+static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
+{
+       struct anon_vma *new_root = anon_vma->root;
+       if (new_root != root) {
+               if (WARN_ON_ONCE(root))
+                       mutex_unlock(&root->mutex);
+               root = new_root;
+               mutex_lock(&root->mutex);
+       }
+       return root;
+}
+
+static inline void unlock_anon_vma_root(struct anon_vma *root)
+{
+       if (root)
+               mutex_unlock(&root->mutex);
+}
+
 static void anon_vma_chain_link(struct vm_area_struct *vma,
                                struct anon_vma_chain *avc,
                                struct anon_vma *anon_vma)
@@ -208,13 +234,11 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
        avc->anon_vma = anon_vma;
        list_add(&avc->same_vma, &vma->anon_vma_chain);
 
-       anon_vma_lock(anon_vma);
        /*
         * It's critical to add new vmas to the tail of the anon_vma,
         * see comment in huge_memory.c:__split_huge_page().
         */
        list_add_tail(&avc->same_anon_vma, &anon_vma->head);
-       anon_vma_unlock(anon_vma);
 }
 
 /*
@@ -224,13 +248,24 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 {
        struct anon_vma_chain *avc, *pavc;
+       struct anon_vma *root = NULL;
 
        list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
-               avc = anon_vma_chain_alloc();
-               if (!avc)
-                       goto enomem_failure;
-               anon_vma_chain_link(dst, avc, pavc->anon_vma);
+               struct anon_vma *anon_vma;
+
+               avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
+               if (unlikely(!avc)) {
+                       unlock_anon_vma_root(root);
+                       root = NULL;
+                       avc = anon_vma_chain_alloc(GFP_KERNEL);
+                       if (!avc)
+                               goto enomem_failure;
+               }
+               anon_vma = pavc->anon_vma;
+               root = lock_anon_vma_root(root, anon_vma);
+               anon_vma_chain_link(dst, avc, anon_vma);
        }
+       unlock_anon_vma_root(root);
        return 0;
 
  enomem_failure:
@@ -263,7 +298,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        anon_vma = anon_vma_alloc();
        if (!anon_vma)
                goto out_error;
-       avc = anon_vma_chain_alloc();
+       avc = anon_vma_chain_alloc(GFP_KERNEL);
        if (!avc)
                goto out_error_free_anon_vma;
 
@@ -280,7 +315,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        get_anon_vma(anon_vma->root);
        /* Mark this anon_vma as the one where our new (COWed) pages go. */
        vma->anon_vma = anon_vma;
+       anon_vma_lock(anon_vma);
        anon_vma_chain_link(vma, avc, anon_vma);
+       anon_vma_unlock(anon_vma);
 
        return 0;
 
@@ -291,36 +328,43 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        return -ENOMEM;
 }
 
-static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
-{
-       struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
-       int empty;
-
-       /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
-       if (!anon_vma)
-               return;
-
-       anon_vma_lock(anon_vma);
-       list_del(&anon_vma_chain->same_anon_vma);
-
-       /* We must garbage collect the anon_vma if it's empty */
-       empty = list_empty(&anon_vma->head);
-       anon_vma_unlock(anon_vma);
-
-       if (empty)
-               put_anon_vma(anon_vma);
-}
-
 void unlink_anon_vmas(struct vm_area_struct *vma)
 {
        struct anon_vma_chain *avc, *next;
+       struct anon_vma *root = NULL;
 
        /*
         * Unlink each anon_vma chained to the VMA.  This list is ordered
         * from newest to oldest, ensuring the root anon_vma gets freed last.
         */
        list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
-               anon_vma_unlink(avc);
+               struct anon_vma *anon_vma = avc->anon_vma;
+
+               root = lock_anon_vma_root(root, anon_vma);
+               list_del(&avc->same_anon_vma);
+
+               /*
+                * Leave empty anon_vmas on the list - we'll need
+                * to free them outside the lock.
+                */
+               if (list_empty(&anon_vma->head))
+                       continue;
+
+               list_del(&avc->same_vma);
+               anon_vma_chain_free(avc);
+       }
+       unlock_anon_vma_root(root);
+
+       /*
+        * Iterate the list once more, it now only contains empty and unlinked
+        * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
+        * needing to acquire the anon_vma->root->mutex.
+        */
+       list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
+               struct anon_vma *anon_vma = avc->anon_vma;
+
+               put_anon_vma(anon_vma);
+
                list_del(&avc->same_vma);
                anon_vma_chain_free(avc);
        }
index d31862e0aa1c00f415d447e12e780482e2bdbca5..8e319a416eec261c2b8d15ab0d9ee7d70afa2aa5 100644 (file)
@@ -71,9 +71,8 @@ EXPORT_SYMBOL(complete_request_key);
  * This is called in context of freshly forked kthread before kernel_execve(),
  * so we can simply install the desired session_keyring at this point.
  */
-static int umh_keys_init(struct subprocess_info *info)
+static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
 {
-       struct cred *cred = (struct cred*)current_cred();
        struct key *keyring = info->data;
 
        return install_session_keyring_to_cred(cred, keyring);
index 86ee16ca365e6fc612a4ad25baf6ecbf9fbc6893..440030818db70c88c582d0047790baed074297c9 100644 (file)
@@ -209,6 +209,7 @@ static void isight_packet(struct fw_iso_context *context, u32 cycle,
                isight->packet_index = -1;
                return;
        }
+       fw_iso_context_queue_flush(isight->context);
 
        if (++index >= QUEUE_LENGTH)
                index = 0;
index 5e619a84da061295fa4758e313935e89aea8009d..15f0161ce4a2342f9eb9b5eca86392a9481eacc2 100644 (file)
@@ -1440,6 +1440,14 @@ static struct snd_emu_chip_details emu_chip_details[] = {
         .ca0102_chip = 1,
         .spk71 = 1,
         .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */
+       /* EMU0404 PCIe */
+       {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40051102,
+        .driver = "Audigy2", .name = "E-mu 0404 PCIe [MAEM8984]",
+        .id = "EMU0404",
+        .emu10k2_chip = 1,
+        .ca0108_chip = 1,
+        .spk71 = 1,
+        .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 PCIe ver_03 */
        /* Note that all E-mu cards require kernel 2.6 or newer. */
        {.vendor = 0x1102, .device = 0x0008,
         .driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]",
index f1de1bac042c260e5612a2f55f9ff13e79a30406..55f0647458c70aebb79028d3131e6f520d63eb19 100644 (file)
@@ -50,7 +50,12 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable);
 int snd_hda_attach_beep_device(struct hda_codec *codec, int nid);
 void snd_hda_detach_beep_device(struct hda_codec *codec);
 #else
-#define snd_hda_attach_beep_device(...)                0
-#define snd_hda_detach_beep_device(...)
+static inline int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
+{
+       return 0;
+}
+static inline void snd_hda_detach_beep_device(struct hda_codec *codec)
+{
+}
 #endif
 #endif
index 43fcfbd32847019931bc774bf00ecd20b2887a9b..61a774b3d3cb5e5ad14846815cb58304335835ba 100644 (file)
@@ -13316,9 +13316,8 @@ static void alc268_acer_lc_setup(struct hda_codec *codec)
        struct alc_spec *spec = codec->spec;
        spec->autocfg.hp_pins[0] = 0x15;
        spec->autocfg.speaker_pins[0] = 0x14;
-       spec->automute_mixer_nid[0] = 0x0f;
        spec->automute = 1;
-       spec->automute_mode = ALC_AUTOMUTE_MIXER;
+       spec->automute_mode = ALC_AUTOMUTE_AMP;
        spec->ext_mic.pin = 0x18;
        spec->ext_mic.mux_idx = 0;
        spec->int_mic.pin = 0x12;
index 605c99e1e520de5d205a76046764d7e5f0f5a852..c952582fb21810933fc9891b687f516d84cf881e 100644 (file)
@@ -832,10 +832,13 @@ static int via_hp_build(struct hda_codec *codec)
        knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
        knew->private_value = nid;
 
-       knew = via_clone_control(spec, &via_hp_mixer[1]);
-       if (knew == NULL)
-               return -ENOMEM;
-       knew->subdevice = side_mute_channel(spec);
+       nid = side_mute_channel(spec);
+       if (nid) {
+               knew = via_clone_control(spec, &via_hp_mixer[1]);
+               if (knew == NULL)
+                       return -ENOMEM;
+               knew->subdevice = nid;
+       }
 
        return 0;
 }
index 34b24286d279d542e3d24e80a3531e8cf95bc8f5..2692e5ae5f2daa53822242c864a4f1b5ddf6a8ff 100644 (file)
@@ -445,7 +445,7 @@ static void lola_reset_setups(struct lola *chip)
        lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */
 }
 
-static int lola_parse_tree(struct lola *chip)
+static int __devinit lola_parse_tree(struct lola *chip)
 {
        unsigned int val;
        int nid, err;
index 949691a876d3635798c6000da1b651d13684b1a6..3f08afc0f0d382b5b509c924abc2eb8532f4c822 100644 (file)
@@ -521,6 +521,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 #define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024)
 
 /* revisions >= 230 indicate AES32 card */
+#define HDSPM_MADI_OLD_REV     207
 #define HDSPM_MADI_REV         210
 #define HDSPM_RAYDAT_REV       211
 #define HDSPM_AIO_REV          212
@@ -1143,7 +1144,7 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
 
                /* if wordclock has synced freq and wordclock is valid */
                if ((status2 & HDSPM_wcLock) != 0 &&
-                               (status & HDSPM_SelSyncRef0) == 0) {
+                               (status2 & HDSPM_SelSyncRef0) == 0) {
 
                        rate_bits = status2 & HDSPM_wcFreqMask;
 
@@ -1639,12 +1640,14 @@ static int snd_hdspm_midi_input_read (struct hdspm_midi *hmidi)
                }
        }
        hmidi->pending = 0;
+       spin_unlock_irqrestore(&hmidi->lock, flags);
 
+       spin_lock_irqsave(&hmidi->hdspm->lock, flags);
        hmidi->hdspm->control_register |= hmidi->ie;
        hdspm_write(hmidi->hdspm, HDSPM_controlRegister,
                    hmidi->hdspm->control_register);
+       spin_unlock_irqrestore(&hmidi->hdspm->lock, flags);
 
-       spin_unlock_irqrestore (&hmidi->lock, flags);
        return snd_hdspm_midi_output_write (hmidi);
 }
 
@@ -6377,6 +6380,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
 
        switch (hdspm->firmware_rev) {
        case HDSPM_MADI_REV:
+       case HDSPM_MADI_OLD_REV:
                hdspm->io_type = MADI;
                hdspm->card_name = "RME MADI";
                hdspm->midiPorts = 3;
index a91719d5918b69c6e76e577364c59f892f5768bd..1e3ae3327dd3a65431b4a517ab5b340dac2ee6f7 100644 (file)
@@ -270,7 +270,6 @@ static int usb6fire_fw_ezusb_upload(
        data = 0x00; /* resume ezusb cpu */
        ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1);
        if (ret < 0) {
-               release_firmware(fw);
                snd_printk(KERN_ERR PREFIX "unable to upload ezusb "
                                "firmware %s: end message.\n", fwname);
                return ret;
index b137b25865cc986cc8f6bc8d21ca482e34b987cd..d144cdb2f15909acefec2f0c45ea0cd1ff523030 100644 (file)
@@ -395,12 +395,12 @@ static int usb6fire_pcm_open(struct snd_pcm_substream *alsa_sub)
        alsa_rt->hw = pcm_hw;
 
        if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               if (rt->rate >= 0)
+               if (rt->rate < ARRAY_SIZE(rates))
                        alsa_rt->hw.rates = rates_alsaid[rt->rate];
                alsa_rt->hw.channels_max = OUT_N_CHANNELS;
                sub = &rt->playback;
        } else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE) {
-               if (rt->rate >= 0)
+               if (rt->rate < ARRAY_SIZE(rates))
                        alsa_rt->hw.rates = rates_alsaid[rt->rate];
                alsa_rt->hw.channels_max = IN_N_CHANNELS;
                sub = &rt->capture;
index 032ba6398a5c696cc46b0957d9452b68aad2d660..940257b5774ec209d09e3b2d4fbee647ff86dd71 100644 (file)
@@ -633,7 +633,7 @@ prefix_SQ = $(subst ','\'',$(prefix))
 
 SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
 
-LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS)
+LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
 
 ALL_CFLAGS += $(BASIC_CFLAGS)
 ALL_CFLAGS += $(ARCH_CFLAGS)
index 1e88485c16a04b755e68bb14510c3fd5cf368769..0a7ed5b5e281c88b321de87ced66a3d29ebb003d 100644 (file)
@@ -2187,6 +2187,7 @@ static const struct flag flags[] = {
        { "TASKLET_SOFTIRQ", 6 },
        { "SCHED_SOFTIRQ", 7 },
        { "HRTIMER_SOFTIRQ", 8 },
+       { "RCU_SOFTIRQ", 9 },
 
        { "HRTIMER_NORESTART", 0 },
        { "HRTIMER_RESTART", 1 },