]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge remote branch 'tip/perf/core' into oprofile/core
authorRobert Richter <robert.richter@amd.com>
Fri, 15 Oct 2010 10:45:00 +0000 (12:45 +0200)
committerRobert Richter <robert.richter@amd.com>
Fri, 15 Oct 2010 10:45:00 +0000 (12:45 +0200)
Conflicts:
arch/arm/oprofile/common.c
kernel/perf_event.c

463 files changed:
CREDITS
Documentation/kprobes.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/kernel/entry.S
arch/alpha/kernel/perf_event.c
arch/alpha/kernel/process.c
arch/alpha/kernel/signal.c
arch/alpha/kernel/systbls.S
arch/arm/Kconfig
arch/arm/boot/compressed/Makefile
arch/arm/common/it8152.c
arch/arm/include/asm/pgtable.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/perf_event.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-davinci/dm355.c
arch/arm/mach-davinci/dm365.c
arch/arm/mach-davinci/dm644x.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-dove/include/mach/io.h
arch/arm/mach-ixp4xx/common-pci.c
arch/arm/mach-ixp4xx/include/mach/hardware.h
arch/arm/mach-kirkwood/include/mach/kirkwood.h
arch/arm/mach-kirkwood/pcie.c
arch/arm/mach-mmp/include/mach/system.h
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/include/mach/hardware.h
arch/arm/mach-pxa/include/mach/io.h
arch/arm/mach-pxa/palm27x.c
arch/arm/mach-pxa/vpac270.c
arch/arm/mach-u300/include/mach/gpio.h
arch/arm/mach-vexpress/ct-ca9x4.c
arch/arm/mm/alignment.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7.S
arch/arm/plat-nomadik/timer.c
arch/arm/plat-omap/Kconfig
arch/arm/plat-omap/mcbsp.c
arch/arm/plat-omap/sram.c
arch/avr32/kernel/module.c
arch/h8300/kernel/module.c
arch/m32r/include/asm/signal.h
arch/m32r/include/asm/unistd.h
arch/m32r/kernel/entry.S
arch/m32r/kernel/ptrace.c
arch/m32r/kernel/signal.c
arch/m68k/mac/macboing.c
arch/mips/Kconfig
arch/mips/alchemy/common/prom.c
arch/mips/boot/compressed/Makefile
arch/mips/cavium-octeon/Kconfig
arch/mips/cavium-octeon/cpu.c
arch/mips/cavium-octeon/executive/Makefile
arch/mips/include/asm/atomic.h
arch/mips/include/asm/cop2.h
arch/mips/include/asm/gic.h
arch/mips/include/asm/mach-tx49xx/kmalloc.h
arch/mips/include/asm/mips-boards/maltaint.h
arch/mips/include/asm/page.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/unistd.h
arch/mips/kernel/irq-gic.c
arch/mips/kernel/kgdb.c
arch/mips/kernel/kspd.c
arch/mips/kernel/linux32.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/mm/dma-default.c
arch/mips/mm/sc-rm7k.c
arch/mips/mti-malta/malta-int.c
arch/mips/pci/pci-rc32434.c
arch/mips/pnx8550/common/reset.c
arch/mips/pnx8550/common/setup.c
arch/mn10300/Kconfig
arch/mn10300/Kconfig.debug
arch/mn10300/include/asm/bitops.h
arch/mn10300/include/asm/signal.h
arch/mn10300/kernel/module.c
arch/mn10300/kernel/signal.c
arch/mn10300/mm/Makefile
arch/mn10300/mm/cache-disabled.c [new file with mode: 0644]
arch/mn10300/mm/cache.c
arch/parisc/kernel/module.c
arch/powerpc/kernel/module.c
arch/powerpc/kernel/perf_callchain.c
arch/powerpc/kernel/perf_event.c
arch/powerpc/kernel/perf_event_fsl_emb.c
arch/powerpc/platforms/512x/clock.c
arch/powerpc/platforms/52xx/efika.c
arch/powerpc/platforms/52xx/mpc52xx_common.c
arch/s390/kernel/module.c
arch/sh/kernel/module.c
arch/sh/kernel/perf_callchain.c
arch/sh/kernel/perf_event.c
arch/sparc/Kconfig
arch/sparc/include/asm/jump_label.h [new file with mode: 0644]
arch/sparc/kernel/Makefile
arch/sparc/kernel/jump_label.c [new file with mode: 0644]
arch/sparc/kernel/module.c
arch/sparc/kernel/perf_event.c
arch/tile/kernel/intvec_32.S
arch/um/drivers/net_kern.c
arch/um/kernel/exec.c
arch/um/kernel/internal.h
arch/um/kernel/syscall.c
arch/x86/Kconfig
arch/x86/boot/early_serial_console.c
arch/x86/include/asm/alternative.h
arch/x86/include/asm/amd_iommu_proto.h
arch/x86/include/asm/amd_iommu_types.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/jump_label.h [new file with mode: 0644]
arch/x86/include/asm/perf_event_p4.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/alternative.c
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/hpet.c
arch/x86/kernel/jump_label.c [new file with mode: 0644]
arch/x86/kernel/kprobes.c
arch/x86/kernel/module.c
arch/x86/kernel/setup.c
arch/x86/mm/fault.c
arch/x86/mm/kmemcheck/kmemcheck.c
arch/x86/xen/time.c
block/blk-merge.c
drivers/acpi/Kconfig
drivers/acpi/acpi_pad.c
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/rsutils.c
drivers/acpi/apei/Kconfig
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/einj.c
drivers/acpi/apei/erst-dbg.c
drivers/acpi/apei/erst.c
drivers/acpi/apei/ghes.c
drivers/acpi/apei/hest.c
drivers/acpi/atomicio.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/fan.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_perflib.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/acpi/video_detect.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_platform.c
drivers/ata/libahci.c
drivers/block/pktcdvd.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/cpuidle/governors/menu.c
drivers/dma/mv_xor.c
drivers/dma/shdma.c
drivers/edac/edac_mc.c
drivers/edac/i7core_edac.c
drivers/gpu/drm/drm_buffer.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i830/i830_dma.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/vga/vgaarb.c
drivers/hwmon/Kconfig
drivers/hwmon/coretemp.c
drivers/hwmon/f71882fg.c
drivers/hwmon/lis3lv02d.c
drivers/hwmon/pkgtemp.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-octeon.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/idle/intel_idle.c [changed mode: 0755->0644]
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/leds/leds-ns2.c
drivers/mfd/max8925-core.c
drivers/mfd/wm831x-irq.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/vmw_balloon.c [moved from drivers/misc/vmware_balloon.c with 100% similarity]
drivers/mmc/host/sdhci-s3c.c
drivers/mtd/nand/omap2.c
drivers/net/3c59x.c
drivers/net/atlx/atl1.c
drivers/net/e1000e/hw.h
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/netdev.c
drivers/net/ibm_newemac/core.c
drivers/net/netxen/netxen_nic_init.c
drivers/net/qlcnic/qlcnic_init.c
drivers/net/rionet.c
drivers/net/sgiseeq.c
drivers/net/smsc911x.c
drivers/net/tulip/de2104x.c
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/oprofile/oprofile_perf.c
drivers/pci/intel-iommu.c
drivers/pci/iov.c
drivers/pci/pci.h
drivers/pci/quirks.c
drivers/pcmcia/pcmcia_resource.c
drivers/pcmcia/pd6729.c
drivers/platform/x86/thinkpad_acpi.c
drivers/regulator/core.c
drivers/regulator/max8649.c
drivers/rtc/rtc-ab3100.c
drivers/rtc/rtc-s3c.c
drivers/s390/net/ctcm_main.c
drivers/serial/mfd.c
drivers/serial/mrst_max3110.c
drivers/spi/spi.c
drivers/spi/spi_gpio.c
drivers/spi/spi_mpc8xxx.c
drivers/staging/ti-st/st.h
drivers/staging/ti-st/st_core.c
drivers/staging/ti-st/st_core.h
drivers/staging/ti-st/st_kim.c
drivers/usb/core/Kconfig
drivers/usb/core/file.c
drivers/usb/core/message.c
drivers/usb/musb/cppi_dma.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_gadget.h
drivers/usb/musb/musb_gadget_ep0.c
drivers/usb/musb/musb_host.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/video/console/fbcon.c
drivers/video/efifb.c
drivers/video/pxa168fb.c
drivers/video/sis/sis_main.c
drivers/xen/xenbus/xenbus_probe.c
fs/aio.c
fs/cifs/cifssmb.c
fs/cifs/inode.c
fs/compat.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/ocfs2/acl.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/dir.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlmglue.h
fs/ocfs2/ocfs2_fs.h
fs/ocfs2/ocfs2_ioctl.h
fs/ocfs2/refcounttree.c
fs/ocfs2/reservations.c
fs/ocfs2/suballoc.c
fs/ocfs2/symlink.c
fs/ocfs2/xattr.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/proc/vmcore.c
fs/reiserfs/ioctl.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
include/acpi/acpixf.h
include/asm-generic/hardirq.h
include/asm-generic/vmlinux.lds.h
include/drm/drmP.h
include/drm/drm_pciids.h
include/linux/cpuidle.h
include/linux/dma-mapping.h
include/linux/dmaengine.h
include/linux/dynamic_debug.h
include/linux/ftrace_event.h
include/linux/interrupt.h
include/linux/jump_label.h [new file with mode: 0644]
include/linux/module.h
include/linux/netlink.h
include/linux/pci_ids.h
include/linux/percpu.h
include/linux/perf_event.h
include/linux/rcupdate.h
include/linux/sched.h
include/linux/socket.h
include/linux/stop_machine.h
include/linux/tracepoint.h
include/linux/wait.h
include/net/addrconf.h
include/net/dst.h
include/net/route.h
include/net/xfrm.h
include/trace/events/irq.h
include/trace/events/napi.h
include/trace/events/net.h [new file with mode: 0644]
include/trace/events/power.h
include/trace/events/skb.h
ipc/sem.c
kernel/Makefile
kernel/exit.c
kernel/fork.c
kernel/hw_breakpoint.c
kernel/jump_label.c [new file with mode: 0644]
kernel/kfifo.c
kernel/kprobes.c
kernel/module.c
kernel/perf_event.c
kernel/sched.c
kernel/smp.c
kernel/test_kprobes.c
kernel/trace/Kconfig
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_workqueue.c
kernel/tracepoint.c
kernel/watchdog.c
lib/Kconfig.debug
lib/bug.c
lib/dynamic_debug.c
lib/list_sort.c
mm/fremap.c
mm/hugetlb.c
mm/ksm.c
mm/mmap.c
mm/oom_kill.c
mm/percpu.c
mm/rmap.c
mm/vmscan.c
net/8021q/vlan_core.c
net/9p/trans_rdma.c
net/9p/trans_virtio.c
net/atm/br2684.c
net/core/datagram.c
net/core/dev.c
net/core/iovec.c
net/core/net-traces.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/Kconfig
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_timer.c
net/ipv4/xfrm4_policy.c
net/ipv4/xfrm4_state.c
net/ipv6/addrconf.c
net/ipv6/addrlabel.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/xfrm6_state.c
net/mac80211/rx.c
net/netfilter/nf_conntrack_extend.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_tproxy_core.c
net/phonet/pep.c
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/rose/af_rose.c
net/sunrpc/xprtsock.c
net/wireless/wext-priv.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
samples/kfifo/dma-example.c
scripts/Makefile
scripts/Makefile.build
scripts/Makefile.lib
scripts/basic/Makefile
scripts/basic/hash.c [deleted file]
scripts/gcc-goto.sh [new file with mode: 0644]
scripts/recordmcount.c [new file with mode: 0644]
scripts/recordmcount.h [new file with mode: 0644]
security/tomoyo/common.c
security/tomoyo/common.h
sound/core/control.c
sound/i2c/other/ak4xxx-adda.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_realtek.c
sound/pci/oxygen/oxygen.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/soc/sh/migor.c
sound/soc/soc-cache.c
tools/perf/Documentation/perf-annotate.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Makefile
tools/perf/builtin-annotate.c
tools/perf/builtin-report.c
tools/perf/feature-tests.mak
tools/perf/scripts/python/bin/netdev-times-record [new file with mode: 0644]
tools/perf/scripts/python/bin/netdev-times-report [new file with mode: 0644]
tools/perf/scripts/python/netdev-times.py [new file with mode: 0644]
tools/perf/util/cache.h
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/hist.c
tools/perf/util/path.c
tools/perf/util/sort.h
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/trace-event-scripting.c
tools/perf/util/ui/browser.c
tools/perf/util/ui/browser.h
tools/perf/util/ui/browsers/annotate.c
tools/perf/util/ui/browsers/hists.c
tools/perf/util/ui/browsers/map.c
tools/perf/util/ui/util.c
tools/perf/util/util.h
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index 72b487869788c14cd40e9535b700f4be166aa12b..41d8e63d5165b5b786db6ab7d8c14fbc49fc0107 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3554,12 +3554,12 @@ E: cvance@nai.com
 D: portions of the Linux Security Module (LSM) framework and security modules
 
 N: Petr Vandrovec
-E: vandrove@vc.cvut.cz
+E: petr@vandrovec.name
 D: Small contributions to ncpfs
 D: Matrox framebuffer driver
-S: Chudenicka 8
-S: 10200 Prague 10, Hostivar
-S: Czech Republic
+S: 21513 Conradia Ct
+S: Cupertino, CA 95014
+S: USA
 
 N: Thibaut Varene
 E: T-Bone@parisc-linux.org
index 1762b81fcdf2ec4a235423687865453a16196aed..741fe66d6eca4852894cac79d6ad2d3162486c7a 100644 (file)
@@ -542,9 +542,11 @@ Kprobes does not use mutexes or allocate memory except during
 registration and unregistration.
 
 Probe handlers are run with preemption disabled.  Depending on the
-architecture, handlers may also run with interrupts disabled.  In any
-case, your handler should not yield the CPU (e.g., by attempting to
-acquire a semaphore).
+architecture and optimization state, handlers may also run with
+interrupts disabled (e.g., kretprobe handlers and optimized kprobe
+handlers run without interrupt disabled on x86/x86-64).  In any case,
+your handler should not yield the CPU (e.g., by attempting to acquire
+a semaphore).
 
 Since a return probe is implemented by replacing the return
 address with the trampoline's address, stack backtraces and calls
index 50b8148448fdfe47b2abe1c838636e4b5ed6391c..f46d8e66333f69afacb60b99484a66b96c30da00 100644 (file)
@@ -962,6 +962,13 @@ W: http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/mach-s3c6410/
 
+ARM/S5P ARM ARCHITECTURES
+M:     Kukjin Kim <kgene.kim@samsung.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-s5p*/
+
 ARM/SHMOBILE ARM ARCHITECTURE
 M:     Paul Mundt <lethal@linux-sh.org>
 M:     Magnus Damm <magnus.damm@gmail.com>
@@ -1220,7 +1227,7 @@ F:        drivers/auxdisplay/
 F:     include/linux/cfag12864b.h
 
 AVR32 ARCHITECTURE
-M:     Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
 W:     http://www.atmel.com/products/AVR32/
 W:     http://avr32linux.org/
 W:     http://avrfreaks.net/
@@ -1228,7 +1235,7 @@ S:        Supported
 F:     arch/avr32/
 
 AVR32/AT32AP MACHINE SUPPORT
-M:     Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
 S:     Supported
 F:     arch/avr32/mach-at32ap/
 
@@ -2199,6 +2206,12 @@ W:       http://acpi4asus.sf.net
 S:     Maintained
 F:     drivers/platform/x86/eeepc-laptop.c
 
+EFIFB FRAMEBUFFER DRIVER
+L:     linux-fbdev@vger.kernel.org
+M:     Peter Jones <pjones@redhat.com>
+S:     Maintained
+F:     drivers/video/efifb.c
+
 EFS FILESYSTEM
 W:     http://aeschi.ch.eu.org/efs/
 S:     Orphan
@@ -2662,6 +2675,8 @@ M:        Guenter Roeck <guenter.roeck@ericsson.com>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
 T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T:     quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:     Maintained
 F:     Documentation/hwmon/
 F:     drivers/hwmon/
@@ -3773,9 +3788,8 @@ W:        http://www.syskonnect.com
 S:     Supported
 
 MATROX FRAMEBUFFER DRIVER
-M:     Petr Vandrovec <vandrove@vc.cvut.cz>
 L:     linux-fbdev@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
 
@@ -3899,10 +3913,8 @@ F:       Documentation/serial/moxa-smartio
 F:     drivers/char/mxser.*
 
 MSI LAPTOP SUPPORT
-M:     Lennart Poettering <mzxreary@0pointer.de>
+M:     Lee, Chun-Yi <jlee@novell.com>
 L:     platform-driver-x86@vger.kernel.org
-W:     https://tango.0pointer.de/mailman/listinfo/s270-linux
-W:     http://0pointer.de/lennart/tchibo.html
 S:     Maintained
 F:     drivers/platform/x86/msi-laptop.c
 
@@ -3919,8 +3931,10 @@ S:       Supported
 F:     drivers/mfd/
 
 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
-S:     Orphan
+M:     Chris Ball <cjb@laptop.org>
 L:     linux-mmc@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S:     Maintained
 F:     drivers/mmc/
 F:     include/linux/mmc/
 
@@ -3962,8 +3976,8 @@ S:        Maintained
 F:     drivers/net/natsemi.c
 
 NCP FILESYSTEM
-M:     Petr Vandrovec <vandrove@vc.cvut.cz>
-S:     Maintained
+M:     Petr Vandrovec <petr@vandrovec.name>
+S:     Odd Fixes
 F:     fs/ncpfs/
 
 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
@@ -5091,8 +5105,10 @@ S:       Maintained
 F:     drivers/mmc/host/sdricoh_cs.c
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
-S:     Orphan
+M:     Chris Ball <cjb@laptop.org>
 L:     linux-mmc@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S:     Maintained
 F:     drivers/mmc/host/sdhci.*
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
index 3133a5772eeb13e85f4eedf994ac662213b3e48c..c2362c2730064480aedbd9bdf8b5dfbe67223969 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 36
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Sheep on Meth
 
 # *DOCUMENTATION*
@@ -568,6 +568,12 @@ endif
 
 ifdef CONFIG_FUNCTION_TRACER
 KBUILD_CFLAGS  += -pg
+ifdef CONFIG_DYNAMIC_FTRACE
+       ifdef CONFIG_HAVE_C_RECORDMCOUNT
+               BUILD_C_RECORDMCOUNT := y
+               export BUILD_C_RECORDMCOUNT
+       endif
+endif
 endif
 
 # We trigger additional mismatches with less inlining
@@ -591,6 +597,11 @@ KBUILD_CFLAGS      += $(call cc-option,-fno-strict-overflow)
 # conserve stack if available
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+       KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
 # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
 # But warn user when we do so
 warn-assign = \
index fe48fc7a3ebaf85c9bf8c56391bf20d9c9b90bbf..53d7f619a1b9b46643c59cd3c7d4ace354148c20 100644 (file)
@@ -158,4 +158,7 @@ config HAVE_PERF_EVENTS_NMI
          subsystem.  Also has support for calculating CPU cycle events
          to determine how many clock cycles in a given period.
 
+config HAVE_ARCH_JUMP_LABEL
+       bool
+
 source "kernel/gcov/Kconfig"
index ab1ee0ab082b996fd0d656b62baa2fcccab7b0b8..6d159cee5f2f43a48bc9aac8355069a3e588ef7f 100644 (file)
@@ -73,8 +73,6 @@
        ldq     $20, HAE_REG($19);      \
        stq     $21, HAE_CACHE($19);    \
        stq     $21, 0($20);            \
-       ldq     $0, 0($sp);             \
-       ldq     $1, 8($sp);             \
 99:;                                   \
        ldq     $19, 72($sp);           \
        ldq     $20, 80($sp);           \
@@ -316,7 +314,7 @@ ret_from_sys_call:
        cmovne  $26, 0, $19             /* $19 = 0 => non-restartable */
        ldq     $0, SP_OFF($sp)
        and     $0, 8, $0
-       beq     $0, restore_all
+       beq     $0, ret_to_kernel
 ret_to_user:
        /* Make sure need_resched and sigpending don't change between
                sampling and the rti.  */
@@ -329,6 +327,11 @@ restore_all:
        RESTORE_ALL
        call_pal PAL_rti
 
+ret_to_kernel:
+       lda     $16, 7
+       call_pal PAL_swpipl
+       br restore_all
+
        .align 3
 $syscall_error:
        /*
@@ -657,7 +660,7 @@ kernel_thread:
        /* We don't actually care for a3 success widgetry in the kernel.
           Not for positive errno values.  */
        stq     $0, 0($sp)              /* $0 */
-       br      restore_all
+       br      ret_to_kernel
 .end kernel_thread
 
 /*
@@ -911,15 +914,6 @@ sys_execve:
        jmp     $31, do_sys_execve
 .end sys_execve
 
-       .align  4
-       .globl  osf_sigprocmask
-       .ent    osf_sigprocmask
-osf_sigprocmask:
-       .prologue 0
-       mov     $sp, $18
-       jmp     $31, sys_osf_sigprocmask
-.end osf_sigprocmask
-
        .align  4
        .globl  alpha_ni_syscall
        .ent    alpha_ni_syscall
index 85d8e4f58c83ce612269162b9635bd49059c39dd..1cc49683fb69b2a5f96639e71a2f1af821479e77 100644 (file)
@@ -307,7 +307,7 @@ again:
                             new_raw_count) != prev_raw_count)
                goto again;
 
-       delta = (new_raw_count  - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
+       delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
 
        /* It is possible on very rare occasions that the PMC has overflowed
         * but the interrupt is yet to come.  Detect and fix this situation.
@@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
                struct hw_perf_event *hwc = &pe->hw;
                int idx = hwc->idx;
 
-               if (cpuc->current_idx[j] != PMC_NO_INDEX) {
-                       cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
-                       continue;
+               if (cpuc->current_idx[j] == PMC_NO_INDEX) {
+                       alpha_perf_event_set_period(pe, hwc, idx);
+                       cpuc->current_idx[j] = idx;
                }
 
-               alpha_perf_event_set_period(pe, hwc, idx);
-               cpuc->current_idx[j] = idx;
-               cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
+               if (!(hwc->state & PERF_HES_STOPPED))
+                       cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
        }
        cpuc->config = cpuc->event[0]->hw.config_base;
 }
@@ -420,12 +419,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
  *  - this function is called from outside this module via the pmu struct
  *    returned from perf event initialisation.
  */
-static int alpha_pmu_enable(struct perf_event *event)
+static int alpha_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
        int n0;
        int ret;
-       unsigned long flags;
+       unsigned long irq_flags;
 
        /*
         * The Sparc code has the IRQ disable first followed by the perf
@@ -435,8 +435,8 @@ static int alpha_pmu_enable(struct perf_event *event)
         * nevertheless we disable the PMCs first to enable a potential
         * final PMI to occur before we disable interrupts.
         */
-       perf_disable();
-       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+       local_irq_save(irq_flags);
 
        /* Default to error to be returned */
        ret = -EAGAIN;
@@ -455,8 +455,12 @@ static int alpha_pmu_enable(struct perf_event *event)
                }
        }
 
-       local_irq_restore(flags);
-       perf_enable();
+       hwc->state = PERF_HES_UPTODATE;
+       if (!(flags & PERF_EF_START))
+               hwc->state |= PERF_HES_STOPPED;
+
+       local_irq_restore(irq_flags);
+       perf_pmu_enable(event->pmu);
 
        return ret;
 }
@@ -467,15 +471,15 @@ static int alpha_pmu_enable(struct perf_event *event)
  *  - this function is called from outside this module via the pmu struct
  *    returned from perf event initialisation.
  */
-static void alpha_pmu_disable(struct perf_event *event)
+static void alpha_pmu_del(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       unsigned long flags;
+       unsigned long irq_flags;
        int j;
 
-       perf_disable();
-       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+       local_irq_save(irq_flags);
 
        for (j = 0; j < cpuc->n_events; j++) {
                if (event == cpuc->event[j]) {
@@ -501,8 +505,8 @@ static void alpha_pmu_disable(struct perf_event *event)
                }
        }
 
-       local_irq_restore(flags);
-       perf_enable();
+       local_irq_restore(irq_flags);
+       perf_pmu_enable(event->pmu);
 }
 
 
@@ -514,13 +518,44 @@ static void alpha_pmu_read(struct perf_event *event)
 }
 
 
-static void alpha_pmu_unthrottle(struct perf_event *event)
+static void alpha_pmu_stop(struct perf_event *event, int flags)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+               cpuc->idx_mask &= ~(1UL<<hwc->idx);
+               hwc->state |= PERF_HES_STOPPED;
+       }
+
+       if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+               alpha_perf_event_update(event, hwc, hwc->idx, 0);
+               hwc->state |= PERF_HES_UPTODATE;
+       }
+
+       if (cpuc->enabled)
+               wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
+}
+
+
+static void alpha_pmu_start(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
+       if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+               return;
+
+       if (flags & PERF_EF_RELOAD) {
+               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+               alpha_perf_event_set_period(event, hwc, hwc->idx);
+       }
+
+       hwc->state = 0;
+
        cpuc->idx_mask |= 1UL<<hwc->idx;
-       wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
+       if (cpuc->enabled)
+               wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
 }
 
 
@@ -642,39 +677,36 @@ static int __hw_perf_event_init(struct perf_event *event)
        return 0;
 }
 
-static const struct pmu pmu = {
-       .enable         = alpha_pmu_enable,
-       .disable        = alpha_pmu_disable,
-       .read           = alpha_pmu_read,
-       .unthrottle     = alpha_pmu_unthrottle,
-};
-
-
 /*
  * Main entry point to initialise a HW performance event.
  */
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int alpha_pmu_event_init(struct perf_event *event)
 {
        int err;
 
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
        if (!alpha_pmu)
-               return ERR_PTR(-ENODEV);
+               return -ENODEV;
 
        /* Do the real initialisation work. */
        err = __hw_perf_event_init(event);
 
-       if (err)
-               return ERR_PTR(err);
-
-       return &pmu;
+       return err;
 }
 
-
-
 /*
  * Main entry point - enable HW performance counters.
  */
-void hw_perf_enable(void)
+static void alpha_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -700,7 +732,7 @@ void hw_perf_enable(void)
  * Main entry point - disable HW performance counters.
  */
 
-void hw_perf_disable(void)
+static void alpha_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -713,6 +745,17 @@ void hw_perf_disable(void)
        wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
 }
 
+static struct pmu pmu = {
+       .pmu_enable     = alpha_pmu_enable,
+       .pmu_disable    = alpha_pmu_disable,
+       .event_init     = alpha_pmu_event_init,
+       .add            = alpha_pmu_add,
+       .del            = alpha_pmu_del,
+       .start          = alpha_pmu_start,
+       .stop           = alpha_pmu_stop,
+       .read           = alpha_pmu_read,
+};
+
 
 /*
  * Main entry point - don't know when this is called but it
@@ -766,7 +809,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
        wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
 
        /* la_ptr is the counter that overflowed. */
-       if (unlikely(la_ptr >= perf_max_events)) {
+       if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
                /* This should never occur! */
                irq_err_count++;
                pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -807,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
                        /* Interrupts coming too quickly; "throttle" the
                         * counter, i.e., disable it for a little while.
                         */
-                       cpuc->idx_mask &= ~(1UL<<idx);
+                       alpha_pmu_stop(event, 0);
                }
        }
        wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
@@ -837,6 +880,7 @@ void __init init_hw_perf_events(void)
 
        /* And set up PMU specification */
        alpha_pmu = &ev67_pmu;
-       perf_max_events = alpha_pmu->num_pmcs;
+
+       perf_pmu_register(&pmu);
 }
 
index 842dba308eab3065510857e50312496c674c1097..3ec35066f1dc51c09367c32bea106453a109953d 100644 (file)
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
        dest[27] = pt->r27;
        dest[28] = pt->r28;
        dest[29] = pt->gp;
-       dest[30] = rdusp();
+       dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
        dest[31] = pt->pc;
 
        /* Once upon a time this was the PS value.  Which is stupid
index 0f6b51ae865aa87d70c3095cee6cec7057e0834a..6f7feb5db27193f33e24e962e4d1be257d8464ef 100644 (file)
@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
 /*
  * The OSF/1 sigprocmask calling sequence is different from the
  * C sigprocmask() sequence..
- *
- * how:
- * 1 - SIG_BLOCK
- * 2 - SIG_UNBLOCK
- * 3 - SIG_SETMASK
- *
- * We change the range to -1 .. 1 in order to let gcc easily
- * use the conditional move instructions.
- *
- * Note that we don't need to acquire the kernel lock for SMP
- * operation, as all of this is local to this thread.
  */
-SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
-               struct pt_regs *, regs)
+SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
 {
-       unsigned long oldmask = -EINVAL;
-
-       if ((unsigned long)how-1 <= 2) {
-               long sign = how-2;              /* -1 .. 1 */
-               unsigned long block, unblock;
-
-               newmask &= _BLOCKABLE;
-               spin_lock_irq(&current->sighand->siglock);
-               oldmask = current->blocked.sig[0];
-
-               unblock = oldmask & ~newmask;
-               block = oldmask | newmask;
-               if (!sign)
-                       block = unblock;
-               if (sign <= 0)
-                       newmask = block;
-               if (_NSIG_WORDS > 1 && sign > 0)
-                       sigemptyset(&current->blocked);
-               current->blocked.sig[0] = newmask;
-               recalc_sigpending();
-               spin_unlock_irq(&current->sighand->siglock);
-
-               regs->r0 = 0;           /* special no error return */
+       sigset_t oldmask;
+       sigset_t mask;
+       unsigned long res;
+
+       siginitset(&mask, newmask & _BLOCKABLE);
+       res = sigprocmask(how, &mask, &oldmask);
+       if (!res) {
+               force_successful_syscall_return();
+               res = oldmask.sig[0];
        }
-       return oldmask;
+       return res;
 }
 
 SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
                old_sigset_t mask;
                if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
                    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
                        return -EFAULT;
-               __get_user(mask, &act->sa_mask);
                siginitset(&new_ka.sa.sa_mask, mask);
                new_ka.ka_restorer = NULL;
        }
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
        if (!ret && oact) {
                if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
                    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
                        return -EFAULT;
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
        }
 
        return ret;
index ce594ef533cc5b3d410b3403a8ca0467e3ebe47f..a6a1de9db16fd6e57d96c4e0b1f76db70f27147f 100644 (file)
@@ -58,7 +58,7 @@ sys_call_table:
        .quad sys_open                          /* 45 */
        .quad alpha_ni_syscall
        .quad sys_getxgid
-       .quad osf_sigprocmask
+       .quad sys_osf_sigprocmask
        .quad alpha_ni_syscall
        .quad alpha_ni_syscall                  /* 50 */
        .quad sys_acct
index 553b7cf17bfb0bac057eaedf0402a0bca2aa47e8..88c97bc7a6f5b7a751b0b628e75a17f4604f82f3 100644 (file)
@@ -271,7 +271,6 @@ config ARCH_AT91
        bool "Atmel AT91"
        select ARCH_REQUIRE_GPIOLIB
        select HAVE_CLK
-       select ARCH_USES_GETTIMEOFFSET
        help
          This enables support for systems based on the Atmel AT91RM9200,
          AT91SAM9 and AT91CAP9 processors.
@@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075
          ACTLR register. Note that setting specific bits in the ACTLR register
          may not be available in non-secure mode.
 
+config ARM_ERRATA_742230
+       bool "ARM errata: DMB operation may be faulty"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 742230 Cortex-A9
+         (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
+         between two write operations may not ensure the correct visibility
+         ordering of the two writes. This workaround sets a specific bit in
+         the diagnostic register of the Cortex-A9 which causes the DMB
+         instruction to behave as a DSB, ensuring the correct behaviour of
+         the two writes.
+
+config ARM_ERRATA_742231
+       bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 742231 Cortex-A9
+         (r2p0..r2p2) erratum. Under certain conditions, specific to the
+         Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
+         accessing some data located in the same cache line, may get corrupted
+         data due to bad handling of the address hazard when the line gets
+         replaced from one of the CPUs at the same time as another CPU is
+         accessing it. This workaround sets specific bits in the diagnostic
+         register of the Cortex-A9 which reduces the linefill issuing
+         capabilities of the processor.
+
 config PL310_ERRATA_588369
        bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
        depends on CACHE_L2X0 && ARCH_OMAP4
index b23f6bc46cfa1dd1029bb53dc7009c3c790f1088..65a7c1c588a94ab4623be0ddfe02a691fb2954c6 100644 (file)
@@ -116,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
 $(obj)/font.c: $(FONTC)
        $(call cmd,shipped)
 
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
        @sed "$(SEDFLAGS)" < $< > $@
index 7974baacafcea74ec055a46ee0f6cea496f24e6f..1bec96e851967101df7a796745b84d24bd320ab8 100644 (file)
@@ -271,6 +271,14 @@ int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
                ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
 }
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+       if (mask >= PHYS_OFFSET + SZ_64M - 1)
+               return 0;
+
+       return -EIO;
+}
+
 int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
 {
        it8152_io.start = IT8152_IO_BASE + 0x12000;
index ab68cf1ef80fe7ccad28bfdd5d5084bb6f1b61dc..e90b167ea8484002fffb0121e7a2d61726851fe6 100644 (file)
@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #define pgprot_dmacoherent(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                                    unsigned long size, pgprot_t vma_prot);
 #else
 #define pgprot_dmacoherent(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
index 1b560825e1cfb83877500152e2e2b5d4ec6553a3..7885722bdf4eff7115137ac46444c8d176f05b57 100644 (file)
@@ -48,6 +48,8 @@ work_pending:
        beq     no_work_pending
        mov     r0, sp                          @ 'regs'
        mov     r2, why                         @ 'syscall'
+       tst     r1, #_TIF_SIGPENDING            @ delivering a signal?
+       movne   why, #0                         @ prevent further restarts
        bl      do_notify_resume
        b       ret_slow_syscall                @ Check work again
 
index ef3bc331518fde8b37a91df7cc3085fa768fed7a..6cc6521881aa5657684242c95345d61d75224e4e 100644 (file)
@@ -227,46 +227,56 @@ again:
 }
 
 static void
-armpmu_disable(struct perf_event *event)
+armpmu_read(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
-
-       WARN_ON(idx < 0);
-
-       clear_bit(idx, cpuc->active_mask);
-       armpmu->disable(hwc, idx);
-
-       barrier();
 
-       armpmu_event_update(event, hwc, idx);
-       cpuc->events[idx] = NULL;
-       clear_bit(idx, cpuc->used_mask);
+       /* Don't read disabled counters! */
+       if (hwc->idx < 0)
+               return;
 
-       perf_event_update_userpage(event);
+       armpmu_event_update(event, hwc, hwc->idx);
 }
 
 static void
-armpmu_read(struct perf_event *event)
+armpmu_stop(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
 
-       /* Don't read disabled counters! */
-       if (hwc->idx < 0)
+       if (!armpmu)
                return;
 
-       armpmu_event_update(event, hwc, hwc->idx);
+       /*
+        * ARM pmu always has to update the counter, so ignore
+        * PERF_EF_UPDATE, see comments in armpmu_start().
+        */
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+               armpmu->disable(hwc, hwc->idx);
+               barrier(); /* why? */
+               armpmu_event_update(event, hwc, hwc->idx);
+               hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       }
 }
 
 static void
-armpmu_unthrottle(struct perf_event *event)
+armpmu_start(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
 
+       if (!armpmu)
+               return;
+
+       /*
+        * ARM pmu always has to reprogram the period, so ignore
+        * PERF_EF_RELOAD, see the comment below.
+        */
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+       hwc->state = 0;
        /*
         * Set the period again. Some counters can't be stopped, so when we
-        * were throttled we simply disabled the IRQ source and the counter
+        * were stopped we simply disabled the IRQ source and the counter
         * may have been left counting. If we don't do this step then we may
         * get an interrupt too soon or *way* too late if the overflow has
         * happened since disabling.
@@ -275,14 +285,33 @@ armpmu_unthrottle(struct perf_event *event)
        armpmu->enable(hwc, hwc->idx);
 }
 
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+
+       WARN_ON(idx < 0);
+
+       clear_bit(idx, cpuc->active_mask);
+       armpmu_stop(event, PERF_EF_UPDATE);
+       cpuc->events[idx] = NULL;
+       clear_bit(idx, cpuc->used_mask);
+
+       perf_event_update_userpage(event);
+}
+
 static int
-armpmu_enable(struct perf_event *event)
+armpmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx;
        int err = 0;
 
+       perf_pmu_disable(event->pmu);
+
        /* If we don't have a space for the counter then finish early. */
        idx = armpmu->get_event_idx(cpuc, hwc);
        if (idx < 0) {
@@ -299,25 +328,19 @@ armpmu_enable(struct perf_event *event)
        cpuc->events[idx] = event;
        set_bit(idx, cpuc->active_mask);
 
-       /* Set the period for the event. */
-       armpmu_event_set_period(event, hwc, idx);
-
-       /* Enable the event. */
-       armpmu->enable(hwc, idx);
+       hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       if (flags & PERF_EF_START)
+               armpmu_start(event, PERF_EF_RELOAD);
 
        /* Propagate our changes to the userspace mapping. */
        perf_event_update_userpage(event);
 
 out:
+       perf_pmu_enable(event->pmu);
        return err;
 }
 
-static struct pmu pmu = {
-       .enable     = armpmu_enable,
-       .disable    = armpmu_disable,
-       .unthrottle = armpmu_unthrottle,
-       .read       = armpmu_read,
-};
+static struct pmu pmu;
 
 static int
 validate_event(struct cpu_hw_events *cpuc,
@@ -497,20 +520,29 @@ __hw_perf_event_init(struct perf_event *event)
        return err;
 }
 
-const struct pmu *
-hw_perf_event_init(struct perf_event *event)
+static int armpmu_event_init(struct perf_event *event)
 {
        int err = 0;
 
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
        if (!armpmu)
-               return ERR_PTR(-ENODEV);
+               return -ENODEV;
 
        event->destroy = hw_perf_event_destroy;
 
        if (!atomic_inc_not_zero(&active_events)) {
-               if (atomic_read(&active_events) > perf_max_events) {
+               if (atomic_read(&active_events) > armpmu->num_events) {
                        atomic_dec(&active_events);
-                       return ERR_PTR(-ENOSPC);
+                       return -ENOSPC;
                }
 
                mutex_lock(&pmu_reserve_mutex);
@@ -524,17 +556,16 @@ hw_perf_event_init(struct perf_event *event)
        }
 
        if (err)
-               return ERR_PTR(err);
+               return err;
 
        err = __hw_perf_event_init(event);
        if (err)
                hw_perf_event_destroy(event);
 
-       return err ? ERR_PTR(err) : &pmu;
+       return err;
 }
 
-void
-hw_perf_enable(void)
+static void armpmu_enable(struct pmu *pmu)
 {
        /* Enable all of the perf events on hardware. */
        int idx;
@@ -555,13 +586,23 @@ hw_perf_enable(void)
        armpmu->start();
 }
 
-void
-hw_perf_disable(void)
+static void armpmu_disable(struct pmu *pmu)
 {
        if (armpmu)
                armpmu->stop();
 }
 
+static struct pmu pmu = {
+       .pmu_enable     = armpmu_enable,
+       .pmu_disable    = armpmu_disable,
+       .event_init     = armpmu_event_init,
+       .add            = armpmu_add,
+       .del            = armpmu_del,
+       .start          = armpmu_start,
+       .stop           = armpmu_stop,
+       .read           = armpmu_read,
+};
+
 /*
  * ARMv6 Performance counter handling code.
  *
@@ -2939,14 +2980,12 @@ init_hw_perf_events(void)
                        armpmu = &armv6pmu;
                        memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
                                        sizeof(armv6_perf_cache_map));
-                       perf_max_events = armv6pmu.num_events;
                        break;
                case 0xB020:    /* ARM11mpcore */
                        armpmu = &armv6mpcore_pmu;
                        memcpy(armpmu_perf_cache_map,
                               armv6mpcore_perf_cache_map,
                               sizeof(armv6mpcore_perf_cache_map));
-                       perf_max_events = armv6mpcore_pmu.num_events;
                        break;
                case 0xC080:    /* Cortex-A8 */
                        armv7pmu.id = ARM_PERF_PMU_ID_CA8;
@@ -2958,7 +2997,6 @@ init_hw_perf_events(void)
                        /* Reset PMNC and read the nb of CNTx counters
                            supported */
                        armv7pmu.num_events = armv7_reset_read_pmnc();
-                       perf_max_events = armv7pmu.num_events;
                        break;
                case 0xC090:    /* Cortex-A9 */
                        armv7pmu.id = ARM_PERF_PMU_ID_CA9;
@@ -2970,7 +3008,6 @@ init_hw_perf_events(void)
                        /* Reset PMNC and read the nb of CNTx counters
                            supported */
                        armv7pmu.num_events = armv7_reset_read_pmnc();
-                       perf_max_events = armv7pmu.num_events;
                        break;
                }
        /* Intel CPUs [xscale]. */
@@ -2981,13 +3018,11 @@ init_hw_perf_events(void)
                        armpmu = &xscale1pmu;
                        memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
                                        sizeof(xscale_perf_cache_map));
-                       perf_max_events = xscale1pmu.num_events;
                        break;
                case 2:
                        armpmu = &xscale2pmu;
                        memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
                                        sizeof(xscale_perf_cache_map));
-                       perf_max_events = xscale2pmu.num_events;
                        break;
                }
        }
@@ -2997,9 +3032,10 @@ init_hw_perf_events(void)
                                arm_pmu_names[armpmu->id], armpmu->num_events);
        } else {
                pr_info("no hardware support available\n");
-               perf_max_events = -1;
        }
 
+       perf_pmu_register(&pmu);
+
        return 0;
 }
 arch_initcall(init_hw_perf_events);
@@ -3007,13 +3043,6 @@ arch_initcall(init_hw_perf_events);
 /*
  * Callchain handling code.
  */
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
-               u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
 
 /*
  * The registers we're interested in are at the end of the variable
@@ -3045,7 +3074,7 @@ user_backtrace(struct frame_tail *tail,
        if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
                return NULL;
 
-       callchain_store(entry, buftail.lr);
+       perf_callchain_store(entry, buftail.lr);
 
        /*
         * Frame pointers should strictly progress back up the stack
@@ -3057,16 +3086,11 @@ user_backtrace(struct frame_tail *tail,
        return buftail.fp - 1;
 }
 
-static void
-perf_callchain_user(struct pt_regs *regs,
-                   struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct frame_tail *tail;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-
-       if (!user_mode(regs))
-               regs = task_pt_regs(current);
 
        tail = (struct frame_tail *)regs->ARM_fp - 1;
 
@@ -3084,56 +3108,18 @@ callchain_trace(struct stackframe *fr,
                void *data)
 {
        struct perf_callchain_entry *entry = data;
-       callchain_store(entry, fr->pc);
+       perf_callchain_store(entry, fr->pc);
        return 0;
 }
 
-static void
-perf_callchain_kernel(struct pt_regs *regs,
-                     struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct stackframe fr;
 
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
        fr.fp = regs->ARM_fp;
        fr.sp = regs->ARM_sp;
        fr.lr = regs->ARM_lr;
        fr.pc = regs->ARM_pc;
        walk_stackframe(&fr, callchain_trace, entry);
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs,
-                 struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (!current || !current->pid)
-               return;
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-
-       if (current->mm)
-               perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
-       entry->nr = 0;
-       perf_do_callchain(regs, entry);
-       return entry;
-}
index 5e71ccd5e7d381ca211d1f73653351bdca814c6b..1276babf84d540ad253d06eb089f29d57beab1a7 100644 (file)
@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
        .sda_is_open_drain      = 1,
        .scl_pin                = AT91_PIN_PA21,
        .scl_is_open_drain      = 1,
-       .udelay                 = 2,            /* ~100 kHz */
+       .udelay                 = 5,            /* ~100 kHz */
 };
 
 static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
        .sda_is_open_drain      = 1,
        .scl_pin                = AT91_PIN_PB11,
        .scl_is_open_drain      = 1,
-       .udelay                 = 2,            /* ~100 kHz */
+       .udelay                 = 5,            /* ~100 kHz */
 };
 
 static struct platform_device at91sam9g45_twi1_device = {
index 3d996b659ff41e43d794d93c167b409afb90c355..9be261beae7ddb2a72e2e9a76d9dbf2e2bf51316 100644 (file)
@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 6b6f4c643709c7d14a4b2baf168464ad772be4ce..7781e35daec3d0a7cf9e956aceb7e717f48c7c44 100644 (file)
@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 40fec315c99a192826d7530d43c6406726ecbcff..5e5b0a7831fbf2b7af156ba76bf4af0b6be26489 100644 (file)
@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00008000),
                .length         = SZ_16K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index e4a3df1872aca89404899034ebbe920d453e7f66..26e8a9c7f50b4393f1e716575950fff967658b9a 100644 (file)
@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 3b3e4721ce2ea0fb9236803e566d4c67b0d5638e..eb4936ff90ad9c42b283fc6c354eae1f01c6c4c7 100644 (file)
@@ -13,8 +13,8 @@
 
 #define IO_SPACE_LIMIT         0xffffffff
 
-#define __io(a)  ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
-                                  DOVE_PCIE0_IO_VIRT_BASE))
-#define __mem_pci(a)           (a)
+#define __io(a)        ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
+                                                DOVE_PCIE0_IO_VIRT_BASE))
+#define __mem_pci(a)   (a)
 
 #endif
index 61cd4d64b98596c7507dcaf67d069dbe22508e50..24498a932ba65b8054de5ba8229ef0e0c838dd71 100644 (file)
@@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
        return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
 }
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+       if (mask >= SZ_64M - 1)
+               return 0;
+
+       return -EIO;
+}
+
 EXPORT_SYMBOL(ixp4xx_pci_read);
 EXPORT_SYMBOL(ixp4xx_pci_write);
 
index f91ca6d4fbe8a2820758d2252091a761caac377c..8138371c406e6584b8990803079c7a093cbe1964 100644 (file)
@@ -26,6 +26,8 @@
 #define PCIBIOS_MAX_MEM                0x4BFFFFFF
 #endif
 
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
+
 #define pcibios_assign_all_busses()    1
 
 /* Register locations and bits */
index 93fc2ec95e7687b30b6144f2116adb0664266a4d..6e924b398919e822ca4f2bddd294358bf4008ede 100644 (file)
@@ -38,7 +38,7 @@
 
 #define KIRKWOOD_PCIE1_IO_PHYS_BASE    0xf3000000
 #define KIRKWOOD_PCIE1_IO_VIRT_BASE    0xfef00000
-#define KIRKWOOD_PCIE1_IO_BUS_BASE     0x00000000
+#define KIRKWOOD_PCIE1_IO_BUS_BASE     0x00100000
 #define KIRKWOOD_PCIE1_IO_SIZE         SZ_1M
 
 #define KIRKWOOD_PCIE_IO_PHYS_BASE     0xf2000000
index 55e7f00836b7cdba68a9662729119e977904a2dd..513ad3102d7c192d2fdc00229b6cdb527b002d87 100644 (file)
@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
         * IORESOURCE_IO
         */
        pp->res[0].name = "PCIe 0 I/O Space";
-       pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
+       pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
        pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
        pp->res[0].flags = IORESOURCE_IO;
 
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
         * IORESOURCE_IO
         */
        pp->res[0].name = "PCIe 1 I/O Space";
-       pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
+       pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
        pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
        pp->res[0].flags = IORESOURCE_IO;
 
index 4f5b0e0ce6cf8f87e0e843b82ce2f8b0b4cbc34c..1a8a25edb1b422ace6925e07171953390b51bc11 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef __ASM_MACH_SYSTEM_H
 #define __ASM_MACH_SYSTEM_H
 
+#include <mach/cputype.h>
+
 static inline void arch_idle(void)
 {
        cpu_do_idle();
@@ -16,6 +18,9 @@ static inline void arch_idle(void)
 
 static inline void arch_reset(char mode, const char *cmd)
 {
-       cpu_reset(0);
+       if (cpu_is_pxa168())
+               cpu_reset(0xffff0000);
+       else
+               cpu_reset(0);
 }
 #endif /* __ASM_MACH_SYSTEM_H */
index 50d5939a78f1bdc8f318f08360243626a14a6105..58093d9e07be44ea482df319c88acb78c0db3975 100644 (file)
@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
        freqs.cpu = policy->cpu;
 
        if (freq_debug)
-               pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
-                        "(SDRAM %d Mhz)\n",
+               pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
                         freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
                         (new_freq_mem / 2000) : (new_freq_mem / 1000));
 
index 7f64d24cd5648df0790c6ec570322195d92359e5..814f1458a06a52ec5af6b37643ca4b6db602dd55 100644 (file)
  * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
  * == 0x3 for pxa300/pxa310/pxa320
  */
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
 #define __cpu_is_pxa2xx(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 13 & 0x7;    \
                _id <= 0x2;                             \
         })
+#else
+#define __cpu_is_pxa2xx(id)    (0)
+#endif
 
+#ifdef CONFIG_PXA3xx
 #define __cpu_is_pxa3xx(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 13 & 0x7;    \
                _id == 0x3;                             \
         })
+#else
+#define __cpu_is_pxa3xx(id)    (0)
+#endif
 
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
 #define __cpu_is_pxa93x(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 4 & 0xfff;   \
                _id == 0x683 || _id == 0x693;           \
         })
+#else
+#define __cpu_is_pxa93x(id)    (0)
+#endif
 
 #define cpu_is_pxa2xx()                                        \
        ({                                              \
@@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void);
 #define PCIBIOS_MIN_IO         0
 #define PCIBIOS_MIN_MEM                0
 #define pcibios_assign_all_busses()    1
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
 #endif
 
-
 #endif  /* _ASM_ARCH_HARDWARE_H */
index 262691fb97d86c2ce36de96d7e180a35b4906298..fdca3be47d9bb1fdaa70d8b7b9c112c34b32c495 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <mach/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
index 77ad6d34ab5bc9cd0eb5ce2917a461ce8904c55c..405b92a29793d80fa771546418d78b4bec7094cd 100644 (file)
@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
        },
 };
 
+static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
+       .use_pio        = 1,
+};
+
 void __init palm27x_pmic_init(void)
 {
        i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
-       pxa27x_set_i2c_power_info(NULL);
+       pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
 }
 #endif
index c9b747cedea8fb48a4a9615d52394fa5d2e77fcc..37d6173bbb660868a3cb696cedcaab97b9eee66d 100644 (file)
@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
 static struct pxamci_platform_data vpac270_mci_platform_data = {
        .ocr_mask               = MMC_VDD_32_33 | MMC_VDD_33_34,
+       .gpio_power             = -1,
        .gpio_card_detect       = GPIO53_VPAC270_SD_DETECT_N,
        .gpio_card_ro           = GPIO52_VPAC270_SD_READONLY,
        .detect_delay_ms        = 200,
index 7b1fc984abb64c85dd3124eba6b6b108000835b9..d5a71abcbaeaf5a8da0255e50e89f5982fd447d4 100644 (file)
@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
 extern int gpio_get_value(unsigned gpio);
 extern void gpio_set_value(unsigned gpio, int value);
 
+#define gpio_get_value_cansleep gpio_get_value
+#define gpio_set_value_cansleep gpio_set_value
+
 /* wrappers to sleep-enable the previous two functions */
 static inline unsigned gpio_to_irq(unsigned gpio)
 {
index 577df6cccb0891503bf0188f3c0f33103c159000..efb127022d42facb807b4cb54220d3299fe24e04 100644 (file)
@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void)
        int i;
 
 #ifdef CONFIG_CACHE_L2X0
-       l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
+       void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
+
+       /* set RAM latencies to 1 cycle for this core tile. */
+       writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+       writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+       l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
 #endif
 
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
index d073b64ae87ec4f6652c67959244292dbb3e69ad..724ba3bce72c952ff44645d2a50e5566b568c943 100644 (file)
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        if (ai_usermode & UM_SIGNAL)
                force_sig(SIGBUS, current);
-       else
-               set_cr(cr_no_alignment);
+       else {
+               /*
+                * We're about to disable the alignment trap and return to
+                * user space.  But if an interrupt occurs before actually
+                * reaching user space, then the IRQ vector entry code will
+                * notice that we were still in kernel space and therefore
+                * the alignment trap won't be re-enabled in that case as it
+                * is presumed to be always on from kernel space.
+                * Let's prevent that race by disabling interrupts here (they
+                * are disabled on the way back to user space anyway in
+                * entry-common.S) and disable the alignment trap only if
+                * there is no work pending for this thread.
+                */
+               raw_local_irq_disable();
+               if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+                       set_cr(cr_no_alignment);
+       }
 
        return 0;
 }
index 6e1c4f6a2b3f3a09ed3f10be9aeafab36c9a0924..6a3a2d0cd6db15806342a7357c2c154b6ab5970d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/nodemask.h>
 #include <linux/memblock.h>
 #include <linux/sort.h>
+#include <linux/fs.h>
 
 #include <asm/cputype.h>
 #include <asm/sections.h>
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
                .domain    = DOMAIN_USER,
        },
        [MT_MEMORY] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_USER | L_PTE_EXEC,
+               .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
                .domain    = DOMAIN_KERNEL,
        },
        [MT_MEMORY_NONCACHED] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
+               .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void)
         * Enable CPU-specific coherency if supported.
         * (Only available on XSC3 at the moment.)
         */
-       if (arch_is_coherent() && cpu_is_xsc3())
+       if (arch_is_coherent() && cpu_is_xsc3()) {
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-
+               mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+       }
        /*
         * ARMv6 and above have extended page tables.
         */
@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void)
                mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
                mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
                mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 #endif
        }
 
@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void)
        mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
        mem_types[MT_ROM].prot_sect |= cp->pmd;
 
        switch (cp->pmd) {
@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void)
        }
 }
 
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                             unsigned long size, pgprot_t vma_prot)
+{
+       if (!pfn_valid(pfn))
+               return pgprot_noncached(vma_prot);
+       else if (file->f_flags & O_SYNC)
+               return pgprot_writecombine(vma_prot);
+       return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+#endif
+
 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
 
 static void __init *early_alloc(unsigned long sz)
index 6a8506d99ee9abbb0845f39d94d663f802c56885..7563ff0141bd85cee6d4cc626b69f7210141094c 100644 (file)
@@ -186,13 +186,14 @@ cpu_v7_name:
  *     It is assumed that:
  *     - cache type register is implemented
  */
-__v7_setup:
+__v7_ca9mp_setup:
 #ifdef CONFIG_SMP
        mrc     p15, 0, r0, c1, c0, 1
        tst     r0, #(1 << 6)                   @ SMP/nAMP mode enabled?
        orreq   r0, r0, #(1 << 6) | (1 << 0)    @ Enable SMP/nAMP mode and
        mcreq   p15, 0, r0, c1, c0, 1           @ TLB ops broadcasting
 #endif
+__v7_setup:
        adr     r12, __v7_setup_stack           @ the local stack
        stmia   r12, {r0-r5, r7, r9, r11, lr}
        bl      v7_flush_dcache_all
@@ -201,11 +202,16 @@ __v7_setup:
        mrc     p15, 0, r0, c0, c0, 0           @ read main ID register
        and     r10, r0, #0xff000000            @ ARM?
        teq     r10, #0x41000000
-       bne     2f
+       bne     3f
        and     r5, r0, #0x00f00000             @ variant
        and     r6, r0, #0x0000000f             @ revision
-       orr     r0, r6, r5, lsr #20-4           @ combine variant and revision
+       orr     r6, r6, r5, lsr #20-4           @ combine variant and revision
+       ubfx    r0, r0, #4, #12                 @ primary part number
 
+       /* Cortex-A8 Errata */
+       ldr     r10, =0x00000c08                @ Cortex-A8 primary part number
+       teq     r0, r10
+       bne     2f
 #ifdef CONFIG_ARM_ERRATA_430973
        teq     r5, #0x00100000                 @ only present in r1p*
        mrceq   p15, 0, r10, c1, c0, 1          @ read aux control register
@@ -213,21 +219,42 @@ __v7_setup:
        mcreq   p15, 0, r10, c1, c0, 1          @ write aux control register
 #endif
 #ifdef CONFIG_ARM_ERRATA_458693
-       teq     r0, #0x20                       @ only present in r2p0
+       teq     r6, #0x20                       @ only present in r2p0
        mrceq   p15, 0, r10, c1, c0, 1          @ read aux control register
        orreq   r10, r10, #(1 << 5)             @ set L1NEON to 1
        orreq   r10, r10, #(1 << 9)             @ set PLDNOP to 1
        mcreq   p15, 0, r10, c1, c0, 1          @ write aux control register
 #endif
 #ifdef CONFIG_ARM_ERRATA_460075
-       teq     r0, #0x20                       @ only present in r2p0
+       teq     r6, #0x20                       @ only present in r2p0
        mrceq   p15, 1, r10, c9, c0, 2          @ read L2 cache aux ctrl register
        tsteq   r10, #1 << 22
        orreq   r10, r10, #(1 << 22)            @ set the Write Allocate disable bit
        mcreq   p15, 1, r10, c9, c0, 2          @ write the L2 cache aux ctrl register
 #endif
+       b       3f
+
+       /* Cortex-A9 Errata */
+2:     ldr     r10, =0x00000c09                @ Cortex-A9 primary part number
+       teq     r0, r10
+       bne     3f
+#ifdef CONFIG_ARM_ERRATA_742230
+       cmp     r6, #0x22                       @ only present up to r2p2
+       mrcle   p15, 0, r10, c15, c0, 1         @ read diagnostic register
+       orrle   r10, r10, #1 << 4               @ set bit #4
+       mcrle   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_742231
+       teq     r6, #0x20                       @ present in r2p0
+       teqne   r6, #0x21                       @ present in r2p1
+       teqne   r6, #0x22                       @ present in r2p2
+       mrceq   p15, 0, r10, c15, c0, 1         @ read diagnostic register
+       orreq   r10, r10, #1 << 12              @ set bit #12
+       orreq   r10, r10, #1 << 22              @ set bit #22
+       mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+#endif
 
-2:     mov     r10, #0
+3:     mov     r10, #0
 #ifdef HARVARD_CACHE
        mcr     p15, 0, r10, c7, c5, 0          @ I+BTB cache invalidate
 #endif
@@ -323,6 +350,29 @@ cpu_elf_name:
 
        .section ".proc.info.init", #alloc, #execinstr
 
+       .type   __v7_ca9mp_proc_info, #object
+__v7_ca9mp_proc_info:
+       .long   0x410fc090              @ Required ID value
+       .long   0xff0ffff0              @ Mask for ID
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ | \
+               PMD_FLAGS
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_XN | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
+       b       __v7_ca9mp_setup
+       .long   cpu_arch_name
+       .long   cpu_elf_name
+       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+       .long   cpu_v7_name
+       .long   v7_processor_functions
+       .long   v7wbi_tlb_fns
+       .long   v6_user_fns
+       .long   v7_cache_fns
+       .size   __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+
        /*
         * Match any ARMv7 processor core.
         */
index ea3ca86c52836ba1ec9fe780da9c1629db91723f..aedf9c1d645e4a820c8f1f9dd884fdf5fd1cebf5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/arch/arm/mach-nomadik/timer.c
+ *  linux/arch/arm/plat-nomadik/timer.c
  *
  * Copyright (C) 2008 STMicroelectronics
  * Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
                cr = readl(mtu_base + MTU_CR(1));
                writel(0, mtu_base + MTU_LR(1));
                writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
-               writel(0x2, mtu_base + MTU_IMSC);
+               writel(1 << 1, mtu_base + MTU_IMSC);
                break;
        case CLOCK_EVT_MODE_SHUTDOWN:
        case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
 {
        unsigned long rate;
        struct clk *clk0;
-       struct clk *clk1;
-       u32 cr;
+       u32 cr = MTU_CRn_32BITS;
 
        clk0 = clk_get_sys("mtu0", NULL);
        BUG_ON(IS_ERR(clk0));
 
-       clk1 = clk_get_sys("mtu1", NULL);
-       BUG_ON(IS_ERR(clk1));
-
        clk_enable(clk0);
-       clk_enable(clk1);
 
        /*
-        * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
-        * use a divide-by-16 counter if it's more than 16MHz
+        * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
+        * for ux500.
+        * Use a divide-by-16 counter if the tick rate is more than 32MHz.
+        * At 32 MHz, the timer (with 32 bit counter) can be programmed
+        * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
+        * with 16 gives too low timer resolution.
         */
-       cr = MTU_CRn_32BITS;;
        rate = clk_get_rate(clk0);
-       if (rate > 16 << 20) {
+       if (rate > 32000000) {
                rate /= 16;
                cr |= MTU_CRn_PRESCALE_16;
        } else {
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
                pr_err("timer: failed to initialize clock source %s\n",
                       nmdk_clksrc.name);
 
-       /* Timer 1 is used for events, fix according to rate */
-       cr = MTU_CRn_32BITS;
-       rate = clk_get_rate(clk1);
-       if (rate > 16 << 20) {
-               rate /= 16;
-               cr |= MTU_CRn_PRESCALE_16;
-       } else {
-               cr |= MTU_CRn_PRESCALE_1;
-       }
+       /* Timer 1 is used for events */
+
        clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
 
        writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
index e39a417a368dc92ad6776a68ef6fefc22a046b76..a92cb499313fdc9583890ebcc182ecae280cdc09 100644 (file)
@@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES
 config OMAP_DEBUG_LEDS
        bool
        depends on OMAP_DEBUG_DEVICES
-       default y if LEDS
+       default y if LEDS_CLASS
 
 config OMAP_RESET_CLOCKS
        bool "Reset unused clocks during boot"
index e31496e35b0f452d4ff9e375855718fc0a078d40..0c8612fd831237164968b1f2120a1134618557e3 100644 (file)
@@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
                /* Writing zero to RSYNC_ERR clears the IRQ */
                MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
        } else {
-               complete(&mcbsp_rx->tx_irq_completion);
+               complete(&mcbsp_rx->rx_irq_completion);
        }
 
        return IRQ_HANDLED;
index 226b2e858d6c9617243a91138821fb0e355daf56..10b3b4c63372f406e6ee206ce691977753017e56 100644 (file)
@@ -220,20 +220,7 @@ void __init omap_map_sram(void)
        if (omap_sram_size == 0)
                return;
 
-       if (cpu_is_omap24xx()) {
-               omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
-
-               base = OMAP2_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-       }
-
        if (cpu_is_omap34xx()) {
-               omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
-               base = OMAP3_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-
                /*
                 * SRAM must be marked as non-cached on OMAP3 since the
                 * CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@ void __init omap_map_sram(void)
                omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
        }
 
-       if (cpu_is_omap44xx()) {
-               omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
-               base = OMAP4_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-       }
-       omap_sram_io_desc[0].length = 1024 * 1024;      /* Use section desc */
+       omap_sram_io_desc[0].virtual = omap_sram_base;
+       base = omap_sram_start;
+       base = ROUND_DOWN(base, PAGE_SIZE);
+       omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+       omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
        iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
 
        printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
index 98f94d041d9c1dd212a0519efac60def8034b7db..a727f54d64d6e633d58ae2836bbbfae4c82f0017 100644 (file)
@@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
        vfree(module->arch.syminfo);
        module->arch.syminfo = NULL;
 
-       return module_bug_finalize(hdr, sechdrs, module);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *module)
 {
-       module_bug_cleanup(module);
 }
index 0865e291c20d2948c95edc70f52925121409599d..db4953dc4e1b445adbdd7e004a68890a5c1c07a6 100644 (file)
@@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 9c1acb2b1a928984c8f62ce4a5cd91a25329dc21..b2eeb0de1c8d337a6d7ab5abef363b4797ab7e9a 100644 (file)
@@ -157,7 +157,6 @@ typedef struct sigaltstack {
 #undef __HAVE_ARCH_SIG_BITOPS
 
 struct pt_regs;
-extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
 
 #define ptrace_signal_deliver(regs, cookie)    do { } while (0)
 
index 76125777483ccda07d9d31d55c2f8f3b3201ea05..c70545689da83ef2ffaef987b2c3374fadca04fc 100644 (file)
 #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 
 #define __IGNORE_lchown
 #define __IGNORE_setuid
index 403869833b98fe6c95fd360d80cd762f3a5008e2..225412bc227e690bcb313743dd16f58fca5c4115 100644 (file)
@@ -235,10 +235,9 @@ work_resched:
 work_notifysig:                                ; deal with pending signals and
                                        ; notify-resume requests
        mv      r0, sp                  ; arg1 : struct pt_regs *regs
-       ldi     r1, #0                  ; arg2 : sigset_t *oldset
-       mv      r2, r9                  ; arg3 : __u32 thread_info_flags
+       mv      r1, r9                  ; arg2 : __u32 thread_info_flags
        bl      do_notify_resume
-       bra     restore_all
+       bra     resume_userspace
 
        ; perform syscall exit tracing
        ALIGN
index e555091eb97cbcf8bbe261851be3b72e90e6075e..0021ade4cba8c86bf1d2fd348b283d8cac591955 100644 (file)
@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
 
        if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
            != sizeof(insn))
-               break;
+               return -EIO;
 
        compute_next_pc(insn, pc, &next_pc, child);
        if (next_pc & 0x80000000)
-               break;
+               return -EIO;
 
        if (embed_debug_trap(child, next_pc))
-               break;
+               return -EIO;
 
        invalidate_cache();
+       return 0;
 }
 
 void user_disable_single_step(struct task_struct *child)
index 144b0f124fc72f08b20f93336f96da81327fe61c..7bbe38645ed5559395f85c5b5fce93eb3f4992e3 100644 (file)
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-int do_signal(struct pt_regs *, sigset_t *);
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
-                 unsigned long r2, unsigned long r3, unsigned long r4,
-                 unsigned long r5, unsigned long r6, struct pt_regs *regs)
-{
-       sigset_t newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&newset, unewset, sizeof(newset)))
-               return -EFAULT;
-       sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
-       spin_lock_irq(&current->sighand->siglock);
-       current->saved_sigmask = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       current->state = TASK_INTERRUPTIBLE;
-       schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
-       return -ERESTARTNOHAND;
-}
-
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
        return (void __user *)((sp - frame_size) & -8ul);
 }
 
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                           sigset_t *set, struct pt_regs *regs)
 {
        struct rt_sigframe __user *frame;
@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                current->comm, current->pid, frame, regs->pc);
 #endif
 
-       return;
+       return 0;
 
 give_sigsegv:
        force_sigsegv(sig, current);
+       return -EFAULT;
+}
+
+static int prev_insn(struct pt_regs *regs)
+{
+       u16 inst;
+       if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
+               return -EFAULT;
+       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
+               regs->bpc -= 2;
+       else
+               regs->bpc -= 4;
+       regs->syscall_nr = -1;
+       return 0;
 }
 
 /*
  * OK, we're invoking a handler
  */
 
-static void
+static int
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
              sigset_t *oldset, struct pt_regs *regs)
 {
-       unsigned short inst;
-
        /* Are we from a system call? */
        if (regs->syscall_nr >= 0) {
                /* If so, check system call restarting.. */
@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                        /* fallthrough */
                        case -ERESTARTNOINTR:
                                regs->r0 = regs->orig_r0;
-                               inst = *(unsigned short *)(regs->bpc - 2);
-                               if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                                       regs->bpc -= 2;
-                               else
-                                       regs->bpc -= 4;
+                               if (prev_insn(regs) < 0)
+                                       return -EFAULT;
                }
        }
 
        /* Set up the stack frame */
-       setup_rt_frame(sig, ka, info, oldset, regs);
+       if (setup_rt_frame(sig, ka, info, oldset, regs))
+               return -EFAULT;
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                sigaddset(&current->blocked,sig);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+       return 0;
 }
 
 /*
@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs)
 {
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       unsigned short inst;
+       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
         * if so.
         */
        if (!user_mode(regs))
-               return 1;
+               return;
 
        if (try_to_freeze()) 
                goto no_signal;
 
-       if (!oldset)
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else
                oldset = &current->blocked;
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
                 */
 
                /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &ka, &info, oldset, regs);
-               return 1;
+               if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+               return;
        }
 
  no_signal:
@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
                    regs->r0 == -ERESTARTSYS ||
                    regs->r0 == -ERESTARTNOINTR) {
                        regs->r0 = regs->orig_r0;
-                       inst = *(unsigned short *)(regs->bpc - 2);
-                       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                               regs->bpc -= 2;
-                       else
-                               regs->bpc -= 4;
-               }
-               if (regs->r0 == -ERESTART_RESTARTBLOCK){
+                       prev_insn(regs);
+               } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
                        regs->r0 = regs->orig_r0;
                        regs->r7 = __NR_restart_syscall;
-                       inst = *(unsigned short *)(regs->bpc - 2);
-                       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                               regs->bpc -= 2;
-                       else
-                               regs->bpc -= 4;
+                       prev_insn(regs);
                }
        }
-       return 0;
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
 }
 
 /*
  * notification of userspace execution resumption
  * - triggered by current->work.notify_resume
  */
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
-                     __u32 thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
 {
        /* Pending single-step? */
        if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
 
        /* deal with pending signal delivery */
        if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(regs,oldset);
+               do_signal(regs);
 
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
index 8f0640847ad2bf7bf99d0a184ed10ce8272a84f8..05285d08e54767a71a814773c23a506386f9626a 100644 (file)
@@ -162,7 +162,7 @@ static void mac_init_asc( void )
 void mac_mksound( unsigned int freq, unsigned int length )
 {
        __u32 cfreq = ( freq << 5 ) / 468;
-       __u32 flags;
+       unsigned long flags;
        int i;
 
        if ( mac_special_bell == NULL )
@@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored )
  */
 static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
 {
-       __u32 flags;
+       unsigned long flags;
 
        /* if the bell is already ringing, ring longer */
        if ( mac_bell_duration > 0 )
@@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
 static void mac_quadra_ring_bell( unsigned long ignored )
 {
        int     i, count = mac_asc_samplespersec / HZ;
-       __u32 flags;
+       unsigned long flags;
 
        /*
         * we neither want a sound buffer overflow nor underflow, so we need to match
index 3ad59dde485209bce858c425e4fc8a2f64b04c09..5526faabfc21433cf3e3fa679c9fcf802646c96c 100644 (file)
@@ -13,6 +13,7 @@ config MIPS
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select RTC_LIB if !MACH_LOONGSON
+       select GENERIC_ATOMIC64 if !64BIT
 
 mainmenu "Linux/MIPS Kernel Configuration"
 
@@ -1646,8 +1647,16 @@ config MIPS_MT_SMP
        select SYS_SUPPORTS_SMP
        select SMP_UP
        help
-         This is a kernel model which is also known a VSMP or lately
-         has been marketesed into SMVP.
+         This is a kernel model which is known a VSMP but lately has been
+         marketesed into SMVP.
+         Virtual SMP uses the processor's VPEs  to implement virtual
+         processors. In currently available configuration of the 34K processor
+         this allows for a dual processor. Both processors will share the same
+         primary caches; each will obtain the half of the TLB for it's own
+         exclusive use. For a layman this model can be described as similar to
+         what Intel calls Hyperthreading.
+
+         For further information see http://www.linux-mips.org/wiki/34K#VSMP
 
 config MIPS_MT_SMTC
        bool "SMTC: Use all TCs on all VPEs for SMP"
@@ -1664,6 +1673,14 @@ config MIPS_MT_SMTC
        help
          This is a kernel model which is known a SMTC or lately has been
          marketesed into SMVP.
+         is presenting the available TC's of the core as processors to Linux.
+         On currently available 34K processors this means a Linux system will
+         see up to 5 processors. The implementation of the SMTC kernel differs
+         significantly from VSMP and cannot efficiently coexist in the same
+         kernel binary so the choice between VSMP and SMTC is a compile time
+         decision.
+
+         For further information see http://www.linux-mips.org/wiki/34K#SMTC
 
 endchoice
 
index c29511b11d44fd6732b0dd153d009dbd7051555d..5340210596297fa54c8723e866aebaeb8c20269e 100644 (file)
@@ -43,7 +43,7 @@ int prom_argc;
 char **prom_argv;
 char **prom_envp;
 
-void prom_init_cmdline(void)
+void __init prom_init_cmdline(void)
 {
        int i;
 
@@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
        }
 }
 
-int prom_get_ethernet_addr(char *ethernet_addr)
+int __init prom_get_ethernet_addr(char *ethernet_addr)
 {
        char *ethaddr_str;
 
@@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr)
 
        return 0;
 }
-EXPORT_SYMBOL(prom_get_ethernet_addr);
 
 void __init prom_free_prom_memory(void)
 {
index ed9bb709c9a3816a4738d0ceef91db8c8de20cd4..5fd7f7a58b7eb9b02446e8ea2aa3e2e9b1e48e8f 100644 (file)
@@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
 hostprogs-y := calc_vmlinuz_load_addr
 
 VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
-               $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS))
+               $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
 
 vmlinuzobjs-y += $(obj)/piggy.o
 
index 094c17e38e163ab691b71bef78710f73848c7645..47323ca452dcbde751536c58c9f613dd1d9d51c8 100644 (file)
@@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE
        def_bool y
        select SPARSEMEM_STATIC
        depends on CPU_CAVIUM_OCTEON
+
+config CAVIUM_OCTEON_HELPER
+       def_bool y
+       depends on OCTEON_ETHERNET || PCI
index c664c8cc2b42cb8970b9f57531a03e2998075566..a5b427909b5cac04d28c4da1b099342ee72df4ce 100644 (file)
@@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
        return NOTIFY_OK;               /* Let default notifier send signals */
 }
 
-static int cnmips_cu2_setup(void)
+static int __init cnmips_cu2_setup(void)
 {
        return cu2_notifier(cnmips_cu2_call, 0);
 }
index 2fd66db6939e0f981c338015127c69788d960843..7f41c5be2190ddca03fc92a00e8f21bd735414f5 100644 (file)
@@ -11,4 +11,4 @@
 
 obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
 
-obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
+obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o
index c63c56bfd18461b558e0cba7380d6297fc47115e..47d87da379f947c8a578f2a2b84e7da804f559a8 100644 (file)
@@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  */
 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
 
+#else /* !CONFIG_64BIT */
+
+#include <asm-generic/atomic64.h>
+
 #endif /* CONFIG_64BIT */
 
 /*
index 2cb2f0c2c4f89342ae5256a79d8082f01af4d319..3532e2c5f098ae46a4a79f7455cd004d699dd4a9 100644 (file)
@@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v);
 
 #define cu2_notifier(fn, pri)                                          \
 ({                                                                     \
-       static struct notifier_block fn##_nb __cpuinitdata = {          \
+       static struct notifier_block fn##_nb = {                        \
                .notifier_call = fn,                                    \
                .priority = pri                                         \
        };                                                              \
index 9b9436a4d816bfe7f26a1eb8a7e7cb677507c388..86548da650e765f79db345f4d3964a5d0eb6c0fe 100644 (file)
@@ -321,6 +321,7 @@ struct gic_intrmask_regs {
  */
 struct gic_intr_map {
        unsigned int cpunum;    /* Directed to this CPU */
+#define GIC_UNUSED             0xdead                  /* Dummy data */
        unsigned int pin;       /* Directed to this Pin */
        unsigned int polarity;  /* Polarity : +/-       */
        unsigned int trigtype;  /* Trigger  : Edge/Levl */
index b74caf65482b2e068b86d2763daa81733bc45503..ff9a8b86cb9363c1fb458546c56550fc35a7a3ab 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef __ASM_MACH_TX49XX_KMALLOC_H
 #define __ASM_MACH_TX49XX_KMALLOC_H
 
-#define ARCH_KMALLOC_MINALIGN  L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
 
 #endif /* __ASM_MACH_TX49XX_KMALLOC_H */
index cea872fc6f5c0d1ae92f00f920f7f991c8b9b806..d11aa02a956a57ca41ff890dbe16bbdd0145db53 100644 (file)
@@ -88,9 +88,6 @@
 
 #define GIC_EXT_INTR(x)                x
 
-/* Dummy data */
-#define X                      0xdead
-
 /* External Interrupts used for IPI */
 #define GIC_IPI_EXT_INTR_RESCHED_VPE0  16
 #define GIC_IPI_EXT_INTR_CALLFNC_VPE0  17
index a16beafcea91dd091f0b491ea572b5902e272a3d..e59cd1ac09c2f82eb8c91af1bdb0b6a520513d89 100644 (file)
@@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t;
     ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
 #endif
 #define __va(x)                ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+
+/*
+ * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
+ * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org).  The
+ * discussion can be found in lkml posting
+ * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
+ * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
+ *
+ * It is unclear if the misscompilations mentioned in
+ * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
+ * until GCC 3.x has been retired before we can apply
+ * https://patchwork.linux-mips.org/patch/1541/
+ */
+
 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
 
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
index 2376f2e06e470a264eeff5115dc692c80e9f37c3..70df9c0d3c5be20e2d7b646460276a44374fc077 100644 (file)
@@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define _TIF_LOAD_WATCH                (1<<TIF_LOAD_WATCH)
 
 /* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK         (0x0000ffef & ~_TIF_SECCOMP)
+#define _TIF_WORK_MASK         (0x0000ffef &                           \
+                                       ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK      (0x8000ffff & ~_TIF_SECCOMP)
 
index baa318a59c97f8c2791842df809c1218796c7492..550725b881d5edec666a5b32bbe1200164ac7fd8 100644 (file)
 #define __NR_perf_event_open           (__NR_Linux + 333)
 #define __NR_accept4                   (__NR_Linux + 334)
 #define __NR_recvmmsg                  (__NR_Linux + 335)
+#define __NR_fanotify_init             (__NR_Linux + 336)
+#define __NR_fanotify_mark             (__NR_Linux + 337)
+#define __NR_prlimit64                 (__NR_Linux + 338)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            335
+#define __NR_Linux_syscalls            338
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                335
+#define __NR_O32_Linux_syscalls                338
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_perf_event_open           (__NR_Linux + 292)
 #define __NR_accept4                   (__NR_Linux + 293)
 #define __NR_recvmmsg                  (__NR_Linux + 294)
+#define __NR_fanotify_init             (__NR_Linux + 295)
+#define __NR_fanotify_mark             (__NR_Linux + 296)
+#define __NR_prlimit64                 (__NR_Linux + 297)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            294
+#define __NR_Linux_syscalls            297
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         294
+#define __NR_64_Linux_syscalls         297
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_accept4                   (__NR_Linux + 297)
 #define __NR_recvmmsg                  (__NR_Linux + 298)
 #define __NR_getdents64                        (__NR_Linux + 299)
+#define __NR_fanotify_init             (__NR_Linux + 300)
+#define __NR_fanotify_mark             (__NR_Linux + 301)
+#define __NR_prlimit64                 (__NR_Linux + 302)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            299
+#define __NR_Linux_syscalls            302
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                299
+#define __NR_N32_Linux_syscalls                302
 
 #ifdef __KERNEL__
 
index b181f2f0ea8e71709f331c8ee32a7f6792999106..82ba9f62f49e3b2faa98abc1f6da2fe1e062a4ed 100644 (file)
@@ -7,7 +7,6 @@
 #include <asm/io.h>
 #include <asm/gic.h>
 #include <asm/gcmpregs.h>
-#include <asm/mips-boards/maltaint.h>
 #include <asm/irq.h>
 #include <linux/hardirq.h>
 #include <asm-generic/bitops/find.h>
@@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
        int             i;
 
        irq -= _irqbase;
-       pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq);
+       pr_debug("%s(%d) called\n", __func__, irq);
        cpumask_and(&tmp, cpumask, cpu_online_mask);
        if (cpus_empty(tmp))
                return -1;
@@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
        /* Setup specifics */
        for (i = 0; i < mapsize; i++) {
                cpu = intrmap[i].cpunum;
-               if (cpu == X)
+               if (cpu == GIC_UNUSED)
                        continue;
                if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
                        continue;
index 1f4e2fa64140ee8204aed74ecf82eba7bab056be..f4546e97c60db111215495f924aa567595c39581 100644 (file)
@@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
        struct pt_regs *regs = args->regs;
        int trap = (regs->cp0_cause & 0x7c) >> 2;
 
-       /* Userpace events, ignore. */
+       /* Userspace events, ignore. */
        if (user_mode(regs))
                return NOTIFY_DONE;
 
index 80e2ba694babcd0d70bd8266a6be941996e2a8a8..29811f043399588604da9bbc00efd9f0997aa530 100644 (file)
@@ -251,7 +251,7 @@ void sp_work_handle_request(void)
                memset(&tz, 0, sizeof(tz));
                if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
                                             (int)&tz, 0, 0)) == 0)
-               ret.retval = tv.tv_sec;
+                       ret.retval = tv.tv_sec;
                break;
 
        case MTSP_SYSCALL_EXIT:
index c2dab140dc98fb1588259063699c7ba09b6f8ed7..6343b4a5b8350cb3a93edea5d75f3154cde48343 100644 (file)
@@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
 {
        return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
 }
+
+SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
+               u64, a3, u64, a4, int, dfd, const char  __user *, pathname)
+{
+       return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
+                                dfd, pathname);
+}
index 17202bbe843f91172534dd30e41e8a9542e0d762..584415eef8c92ae9168790b4ef824470c2034f73 100644 (file)
@@ -583,7 +583,10 @@ einval:    li      v0, -ENOSYS
        sys     sys_rt_tgsigqueueinfo   4
        sys     sys_perf_event_open     5
        sys     sys_accept4             4
-       sys     sys_recvmmsg            5
+       sys     sys_recvmmsg            5       /* 4335 */
+       sys     sys_fanotify_init       2
+       sys     sys_fanotify_mark       6
+       sys     sys_prlimit64           4
        .endm
 
        /* We pre-compute the number of _instruction_ bytes needed to
index a8a6c596eb0405bab886e8dfff6ffeb8097d7fc6..5573f8e4e326910b3873007c2b216fb31fe566d4 100644 (file)
@@ -416,9 +416,12 @@ sys_call_table:
        PTR     sys_pipe2
        PTR     sys_inotify_init1
        PTR     sys_preadv
-       PTR     sys_pwritev                     /* 5390 */
+       PTR     sys_pwritev                     /* 5290 */
        PTR     sys_rt_tgsigqueueinfo
        PTR     sys_perf_event_open
        PTR     sys_accept4
-       PTR     sys_recvmmsg
+       PTR     sys_recvmmsg
+       PTR     sys_fanotify_init               /* 5295 */
+       PTR     sys_fanotify_mark
+       PTR     sys_prlimit64
        .size   sys_call_table,.-sys_call_table
index a3d66137731ac24386972c82426a4679d9ff9be3..1e38ec97672ed82d30b238ad7bbd6adbb2c10b9f 100644 (file)
@@ -419,5 +419,8 @@ EXPORT(sysn32_call_table)
        PTR     sys_perf_event_open
        PTR     sys_accept4
        PTR     compat_sys_recvmmsg
-       PTR     sys_getdents
+       PTR     sys_getdents64
+       PTR     sys_fanotify_init               /* 6300 */
+       PTR     sys_fanotify_mark
+       PTR     sys_prlimit64
        .size   sysn32_call_table,.-sysn32_call_table
index 813689ef23847c6a2db230ebd6dcae60e0ff79f6..171979fc98e59a5169049d44001ad75383268684 100644 (file)
@@ -538,5 +538,8 @@ sys_call_table:
        PTR     compat_sys_rt_tgsigqueueinfo
        PTR     sys_perf_event_open
        PTR     sys_accept4
-       PTR     compat_sys_recvmmsg
+       PTR     compat_sys_recvmmsg             /* 4335 */
+       PTR     sys_fanotify_init
+       PTR     sys_32_fanotify_mark
+       PTR     sys_prlimit64
        .size   sys_call_table,.-sys_call_table
index 7ba890860d98cb3916c84f369e3fef0200b07f2b..469d4019f795bd072b0aa4ba109d075b5377f55c 100644 (file)
@@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
 
 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
 {
+       gfp_t dma_flag;
+
        /* ignore region specifiers */
        gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ISA
        if (dev == NULL)
-               gfp |= __GFP_DMA;
-       else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
-               gfp |= __GFP_DMA;
+               dma_flag = __GFP_DMA;
        else
 #endif
-#ifdef CONFIG_ZONE_DMA32
+#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
             if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
-               gfp |= __GFP_DMA32;
+                       dma_flag = __GFP_DMA;
+       else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+                       dma_flag = __GFP_DMA32;
+       else
+#endif
+#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+               dma_flag = __GFP_DMA32;
+       else
+#endif
+#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+               dma_flag = __GFP_DMA;
        else
 #endif
-               ;
+               dma_flag = 0;
 
        /* Don't invoke OOM killer */
        gfp |= __GFP_NORETRY;
 
-       return gfp;
+       return gfp | dma_flag;
 }
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
index 1ef75cd80a0d819827f5057549fdd2622442a752..274af3be1442b42fa41d3cb960b598ddcbf5b8c2 100644 (file)
@@ -30,7 +30,7 @@
 #define tc_lsize       32
 
 extern unsigned long icache_way_size, dcache_way_size;
-unsigned long tcache_size;
+static unsigned long tcache_size;
 
 #include <asm/r4kcache.h>
 
index 15949b0be811f9718af9e2896d4bd9e947c84897..b79b24afe3a2fc67ab6687a082e44d45bf219242 100644 (file)
@@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
  */
 
 #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
+#define X GIC_UNUSED
+
 static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
        { X, X,            X,           X,              0 },
        { X, X,            X,           X,              0 },
@@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
        { X, X,            X,           X,              0 },
        /* The remainder of this table is initialised by fill_ipi_map */
 };
+#undef X
 
 /*
  * GCMP needs to be detected before any SMP initialisation
index 71f7d27b0d4cccf28dba3777b0a8848de53f82c4..f31218e17d3c1437f4f8ef15a7d1855ddc6a32fb 100644 (file)
@@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void)
        if (!((pcicvalue == PCIM_H_EA) ||
              (pcicvalue == PCIM_H_IA_FIX) ||
              (pcicvalue == PCIM_H_IA_RR))) {
-               pr_err(KERN_ERR "PCI init error!!!\n");
+               pr_err("PCI init error!!!\n");
                /* Not in Host Mode, return ERROR */
                return -1;
        }
index fadd8744a6bccfbf25283369608c66b16afeab04..e7a12ff304b9475c0db2e6097989c510b288a3c4 100644 (file)
  */
 #include <linux/kernel.h>
 
+#include <asm/processor.h>
 #include <asm/reboot.h>
 #include <glb.h>
 
 void pnx8550_machine_restart(char *command)
 {
-       char head[] = "************* Machine restart *************";
-       char foot[] = "*******************************************";
-
-       printk("\n\n");
-       printk("%s\n", head);
-       if (command != NULL)
-               printk("* %s\n", command);
-       printk("%s\n", foot);
-
        PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
 }
 
 void pnx8550_machine_halt(void)
 {
-       printk("*** Machine halt. (Not implemented) ***\n");
-}
-
-void pnx8550_machine_power_off(void)
-{
-       printk("*** Machine power off.  (Not implemented) ***\n");
+       while (1) {
+               if (cpu_wait)
+                       cpu_wait();
+       }
 }
index 64246c9c875c51d09e5c3861ca0e6f1096d50ac5..43cb3945fdbfffb8b355789e237a9b27df21abef 100644 (file)
@@ -44,7 +44,6 @@
 extern void __init board_setup(void);
 extern void pnx8550_machine_restart(char *);
 extern void pnx8550_machine_halt(void);
-extern void pnx8550_machine_power_off(void);
 extern struct resource ioport_resource;
 extern struct resource iomem_resource;
 extern char *prom_getcmdline(void);
@@ -100,7 +99,7 @@ void __init plat_mem_setup(void)
 
         _machine_restart = pnx8550_machine_restart;
         _machine_halt = pnx8550_machine_halt;
-        pm_power_off = pnx8550_machine_power_off;
+        pm_power_off = pnx8550_machine_halt;
 
        /* Clear the Global 2 Register, PCI Inta Output Enable Registers
           Bit 1:Enable DAC Powerdown
index 444b9f918fdf8f2d5dec64b9827d8d746bb1ade1..7c2a2f7f8dc143889b74605d2741f5707ad330e4 100644 (file)
@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration"
 config MN10300
        def_bool y
        select HAVE_OPROFILE
-       select HAVE_ARCH_TRACEHOOK
 
 config AM33
        def_bool y
index ff80e86b9bd2d2305d34a731685d94fe5d3f3491..ce83c74b3fd714abf68fca4a4d2beef024545c83 100644 (file)
@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
 
 choice
        prompt "GDB stub port"
-       default GDBSTUB_TTYSM0
+       default GDBSTUB_ON_TTYSM0
        depends on GDBSTUB
        help
          Select the serial port used for GDB-stub.
index f49ac49e09adc079adaabdd9893258ae9b795b7f..3f50e966107641f21f346a38e50cca97d2eda24b 100644 (file)
@@ -229,9 +229,9 @@ int ffs(int x)
 #include <asm-generic/bitops/hweight.h>
 
 #define ext2_set_bit_atomic(lock, nr, addr) \
-       test_and_set_bit((nr) ^ 0x18, (addr))
+       test_and_set_bit((nr), (addr))
 #define ext2_clear_bit_atomic(lock, nr, addr) \
-       test_and_clear_bit((nr) ^ 0x18, (addr))
+       test_and_clear_bit((nr), (addr))
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/minix-le.h>
index 7e891fce2370028acea4a56497aafa74c443c0be..1865d72a86ff7cc6823a7be07dbbb0f3907e2518 100644 (file)
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
 
 /* These should not be considered constants from userland.  */
 #define SIGRTMIN       32
-#define SIGRTMAX       (_NSIG-1)
+#define SIGRTMAX       _NSIG
 
 /*
  * SA_FLAGS values:
index 6aea7fd76993b931f31f2dda76e72aecc1e31b4d..196a111e2e2937b134217356991c0ca2f68bda05 100644 (file)
@@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 /*
@@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr,
  */
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 717db14c2cc32d8905a45e6cfae4bf2a00d38abd..d4de05ab786464cd585e7f1f0e6ed1652ad3de1c 100644 (file)
@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
                old_sigset_t mask;
                if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
                    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
                        return -EFAULT;
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
                siginitset(&new_ka.sa.sa_mask, mask);
        }
 
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
        if (!ret && oact) {
                if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
                    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
                        return -EFAULT;
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
        }
 
        return ret;
@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs,
 {
        unsigned int err = 0;
 
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
        if (is_using_fpu(current))
                fpu_kill_state(current);
 
@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
        regs->d0 = sig;
        regs->d1 = (unsigned long) &frame->sc;
 
-       set_fs(USER_DS);
-
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
                ptrace_notify(SIGTRAP);
@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 }
 
@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->d0 = sig;
        regs->d1 = (long) &frame->info;
 
-       set_fs(USER_DS);
-
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
                ptrace_notify(SIGTRAP);
@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 }
 
+static inline void stepback(struct pt_regs *regs)
+{
+       regs->pc -= 2;
+       regs->orig_d0 = -1;
+}
+
 /*
  * handle the actual delivery of a signal to userspace
  */
@@ -459,7 +464,7 @@ static int handle_signal(int sig,
                        /* fallthrough */
                case -ERESTARTNOINTR:
                        regs->d0 = regs->orig_d0;
-                       regs->pc -= 2;
+                       stepback(regs);
                }
        }
 
@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs)
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
                        regs->d0 = regs->orig_d0;
-                       regs->pc -= 2;
+                       stepback(regs);
                        break;
 
                case -ERESTART_RESTARTBLOCK:
                        regs->d0 = __NR_restart_syscall;
-                       regs->pc -= 2;
+                       stepback(regs);
                        break;
                }
        }
index 28b9d983db0cb280c07dcd76921d695892e11c48..1557277fbc5c03962c56f39b7d1a5687bdea80bd 100644 (file)
@@ -2,13 +2,11 @@
 # Makefile for the MN10300-specific memory management code
 #
 
+cacheflush-y   := cache.o cache-mn10300.o
+cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+
+cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
+
 obj-y := \
        init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
-       misalignment.o dma-alloc.o
-
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
-obj-y  += cache.o cache-mn10300.o
-ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
-obj-y  += cache-flush-mn10300.o
-endif
-endif
+       misalignment.o dma-alloc.o $(cacheflush-y)
diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c
new file mode 100644 (file)
index 0000000..f669ea4
--- /dev/null
@@ -0,0 +1,21 @@
+/* Handle the cache being disabled
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+
+/*
+ * allow userspace to flush the instruction cache
+ */
+asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
+{
+       if (end < start)
+               return -EINVAL;
+       return 0;
+}
index 1b76719ec1c37b1686a648cd07f8c5e7baaf9ce1..9261217e8d2c5741bb500b829bbd7663859b5541 100644 (file)
@@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page);
 void flush_icache_range(unsigned long start, unsigned long end)
 {
 #ifdef CONFIG_MN10300_CACHE_WBACK
-       unsigned long addr, size, off;
+       unsigned long addr, size, base, off;
        struct page *page;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *ppte, pte;
 
+       if (end > 0x80000000UL) {
+               /* addresses above 0xa0000000 do not go through the cache */
+               if (end > 0xa0000000UL) {
+                       end = 0xa0000000UL;
+                       if (start >= end)
+                               return;
+               }
+
+               /* kernel addresses between 0x80000000 and 0x9fffffff do not
+                * require page tables, so we just map such addresses directly */
+               base = (start >= 0x80000000UL) ? start : 0x80000000UL;
+               mn10300_dcache_flush_range(base, end);
+               if (base == start)
+                       goto invalidate;
+               end = base;
+       }
+
        for (; start < end; start += size) {
                /* work out how much of the page to flush */
                off = start & (PAGE_SIZE - 1);
@@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
        }
 #endif
 
+invalidate:
        mn10300_icache_inv();
 }
 EXPORT_SYMBOL(flush_icache_range);
index 159a2b81e90c630db82eb9834c2096df7eb66896..6e81bb596e5b476e598e4a7309e4aba80ba0a322 100644 (file)
@@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr,
        nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
        DEBUGP("NEW num_symtab %lu\n", nsyms);
        symhdr->sh_size = nsyms * sizeof(Elf_Sym);
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
        deregister_unwind_table(mod);
-       module_bug_cleanup(mod);
 }
index 477c663e014043a5c08fbaf51e82853005391344..49cee9df225be8bfc6b06a429ee9243d10484439 100644 (file)
@@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr,
                const Elf_Shdr *sechdrs, struct module *me)
 {
        const Elf_Shdr *sect;
-       int err;
-
-       err = module_bug_finalize(hdr, sechdrs, me);
-       if (err)
-               return err;
 
        /* Apply feature fixups */
        sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr,
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 95ad9dad298e9d4773117b0406bc4a3378d77e5e..d05ae4204bbf3d3ddcc84266476b736a6790715c 100644 (file)
 #include "ppc32.h"
 #endif
 
-/*
- * Store another value in a callchain_entry.
- */
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       unsigned int nr = entry->nr;
-
-       if (nr < PERF_MAX_STACK_DEPTH) {
-               entry->ip[nr] = ip;
-               entry->nr = nr + 1;
-       }
-}
 
 /*
  * Is sp valid as the address of the next kernel stack frame after prev_sp?
@@ -58,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
        return 0;
 }
 
-static void perf_callchain_kernel(struct pt_regs *regs,
-                                 struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        unsigned long sp, next_sp;
        unsigned long next_ip;
@@ -69,8 +57,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
 
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->nip);
+       perf_callchain_store(entry, regs->nip);
 
        if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
                return;
@@ -89,7 +76,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        next_ip = regs->nip;
                        lr = regs->link;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_KERNEL);
+                       perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
 
                } else {
                        if (level == 0)
@@ -111,7 +98,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        ++level;
                }
 
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                if (!valid_next_sp(next_sp, sp))
                        return;
                sp = next_sp;
@@ -233,8 +220,8 @@ static int sane_signal_64_frame(unsigned long sp)
                puc == (unsigned long) &sf->uc;
 }
 
-static void perf_callchain_user_64(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long sp, next_sp;
        unsigned long next_ip;
@@ -246,8 +233,7 @@ static void perf_callchain_user_64(struct pt_regs *regs,
        next_ip = regs->nip;
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, next_ip);
+       perf_callchain_store(entry, next_ip);
 
        for (;;) {
                fp = (unsigned long __user *) sp;
@@ -276,14 +262,14 @@ static void perf_callchain_user_64(struct pt_regs *regs,
                            read_user_stack_64(&uregs[PT_R1], &sp))
                                return;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_USER);
-                       callchain_store(entry, next_ip);
+                       perf_callchain_store(entry, PERF_CONTEXT_USER);
+                       perf_callchain_store(entry, next_ip);
                        continue;
                }
 
                if (level == 0)
                        next_ip = lr;
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                ++level;
                sp = next_sp;
        }
@@ -315,8 +301,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
        return __get_user_inatomic(*ret, ptr);
 }
 
-static inline void perf_callchain_user_64(struct pt_regs *regs,
-                                         struct perf_callchain_entry *entry)
+static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                         struct pt_regs *regs)
 {
 }
 
@@ -435,8 +421,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
        return mctx->mc_gregs;
 }
 
-static void perf_callchain_user_32(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned int sp, next_sp;
        unsigned int next_ip;
@@ -447,8 +433,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
        next_ip = regs->nip;
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, next_ip);
+       perf_callchain_store(entry, next_ip);
 
        while (entry->nr < PERF_MAX_STACK_DEPTH) {
                fp = (unsigned int __user *) (unsigned long) sp;
@@ -470,45 +455,24 @@ static void perf_callchain_user_32(struct pt_regs *regs,
                            read_user_stack_32(&uregs[PT_R1], &sp))
                                return;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_USER);
-                       callchain_store(entry, next_ip);
+                       perf_callchain_store(entry, PERF_CONTEXT_USER);
+                       perf_callchain_store(entry, next_ip);
                        continue;
                }
 
                if (level == 0)
                        next_ip = lr;
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                ++level;
                sp = next_sp;
        }
 }
 
-/*
- * Since we can't get PMU interrupts inside a PMU interrupt handler,
- * we don't need separate irq and nmi entries here.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
-
-       entry->nr = 0;
-
-       if (!user_mode(regs)) {
-               perf_callchain_kernel(regs, entry);
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-
-       if (regs) {
-               if (current_is_64bit())
-                       perf_callchain_user_64(regs, entry);
-               else
-                       perf_callchain_user_32(regs, entry);
-       }
-
-       return entry;
+       if (current_is_64bit())
+               perf_callchain_user_64(entry, regs);
+       else
+               perf_callchain_user_32(entry, regs);
 }
index d301a30445e09a49cec4a3d4dcf2ea01529934b3..9cb4924b6c07837a47718bb1c842eb40a96df7b8 100644 (file)
@@ -402,6 +402,9 @@ static void power_pmu_read(struct perf_event *event)
 {
        s64 val, delta, prev;
 
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
        if (!event->hw.idx)
                return;
        /*
@@ -517,7 +520,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
  * Disable all events to prevent PMU interrupts and to allow
  * events to be added or removed.
  */
-void hw_perf_disable(void)
+static void power_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -565,7 +568,7 @@ void hw_perf_disable(void)
  * If we were previously disabled and events were added, then
  * put the new config on the PMU.
  */
-void hw_perf_enable(void)
+static void power_pmu_enable(struct pmu *pmu)
 {
        struct perf_event *event;
        struct cpu_hw_events *cpuhw;
@@ -672,6 +675,8 @@ void hw_perf_enable(void)
                }
                local64_set(&event->hw.prev_count, val);
                event->hw.idx = idx;
+               if (event->hw.state & PERF_HES_STOPPED)
+                       val = 0;
                write_pmc(idx, val);
                perf_event_update_userpage(event);
        }
@@ -727,7 +732,7 @@ static int collect_events(struct perf_event *group, int max_count,
  * re-enable the PMU in order to get hw_perf_enable to do the
  * actual work of reconfiguring the PMU.
  */
-static int power_pmu_enable(struct perf_event *event)
+static int power_pmu_add(struct perf_event *event, int ef_flags)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -735,7 +740,7 @@ static int power_pmu_enable(struct perf_event *event)
        int ret = -EAGAIN;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        /*
         * Add the event to the list (if there is room)
@@ -749,6 +754,9 @@ static int power_pmu_enable(struct perf_event *event)
        cpuhw->events[n0] = event->hw.config;
        cpuhw->flags[n0] = event->hw.event_base;
 
+       if (!(ef_flags & PERF_EF_START))
+               event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
        /*
         * If group events scheduling transaction was started,
         * skip the schedulability test here, it will be peformed
@@ -769,7 +777,7 @@ nocheck:
 
        ret = 0;
  out:
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
        return ret;
 }
@@ -777,14 +785,14 @@ nocheck:
 /*
  * Remove a event from the PMU.
  */
-static void power_pmu_disable(struct perf_event *event)
+static void power_pmu_del(struct perf_event *event, int ef_flags)
 {
        struct cpu_hw_events *cpuhw;
        long i;
        unsigned long flags;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        power_pmu_read(event);
 
@@ -821,34 +829,60 @@ static void power_pmu_disable(struct perf_event *event)
                cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
        }
 
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
 /*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
+ * POWER-PMU does not support disabling individual counters, hence
+ * program their cycle counter to their max value and ignore the interrupts.
  */
-static void power_pmu_unthrottle(struct perf_event *event)
+
+static void power_pmu_start(struct perf_event *event, int ef_flags)
+{
+       unsigned long flags;
+       s64 left;
+
+       if (!event->hw.idx || !event->hw.sample_period)
+               return;
+
+       if (!(event->hw.state & PERF_HES_STOPPED))
+               return;
+
+       if (ef_flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+
+       event->hw.state = 0;
+       left = local64_read(&event->hw.period_left);
+       write_pmc(event->hw.idx, left);
+
+       perf_event_update_userpage(event);
+       perf_pmu_enable(event->pmu);
+       local_irq_restore(flags);
+}
+
+static void power_pmu_stop(struct perf_event *event, int ef_flags)
 {
-       s64 val, left;
        unsigned long flags;
 
        if (!event->hw.idx || !event->hw.sample_period)
                return;
+
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
+
        power_pmu_read(event);
-       left = event->hw.sample_period;
-       event->hw.last_period = left;
-       val = 0;
-       if (left < 0x80000000L)
-               val = 0x80000000L - left;
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
+       event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       write_pmc(event->hw.idx, 0);
+
        perf_event_update_userpage(event);
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
@@ -857,10 +891,11 @@ static void power_pmu_unthrottle(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  */
-void power_pmu_start_txn(const struct pmu *pmu)
+void power_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
+       perf_pmu_disable(pmu);
        cpuhw->group_flag |= PERF_EVENT_TXN;
        cpuhw->n_txn_start = cpuhw->n_events;
 }
@@ -870,11 +905,12 @@ void power_pmu_start_txn(const struct pmu *pmu)
  * Clear the flag and pmu::enable() will perform the
  * schedulability test.
  */
-void power_pmu_cancel_txn(const struct pmu *pmu)
+void power_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
 }
 
 /*
@@ -882,7 +918,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
  * Perform the group schedulability test as a whole
  * Return 0 if success
  */
-int power_pmu_commit_txn(const struct pmu *pmu)
+int power_pmu_commit_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        long i, n;
@@ -901,19 +937,10 @@ int power_pmu_commit_txn(const struct pmu *pmu)
                cpuhw->event[i]->hw.config = cpuhw->events[i];
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
        return 0;
 }
 
-struct pmu power_pmu = {
-       .enable         = power_pmu_enable,
-       .disable        = power_pmu_disable,
-       .read           = power_pmu_read,
-       .unthrottle     = power_pmu_unthrottle,
-       .start_txn      = power_pmu_start_txn,
-       .cancel_txn     = power_pmu_cancel_txn,
-       .commit_txn     = power_pmu_commit_txn,
-};
-
 /*
  * Return 1 if we might be able to put event on a limited PMC,
  * or 0 if not.
@@ -1014,7 +1041,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
        return 0;
 }
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int power_pmu_event_init(struct perf_event *event)
 {
        u64 ev;
        unsigned long flags;
@@ -1026,25 +1053,27 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        struct cpu_hw_events *cpuhw;
 
        if (!ppmu)
-               return ERR_PTR(-ENXIO);
+               return -ENOENT;
+
        switch (event->attr.type) {
        case PERF_TYPE_HARDWARE:
                ev = event->attr.config;
                if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
-                       return ERR_PTR(-EOPNOTSUPP);
+                       return -EOPNOTSUPP;
                ev = ppmu->generic_events[ev];
                break;
        case PERF_TYPE_HW_CACHE:
                err = hw_perf_cache_event(event->attr.config, &ev);
                if (err)
-                       return ERR_PTR(err);
+                       return err;
                break;
        case PERF_TYPE_RAW:
                ev = event->attr.config;
                break;
        default:
-               return ERR_PTR(-EINVAL);
+               return -ENOENT;
        }
+
        event->hw.config_base = ev;
        event->hw.idx = 0;
 
@@ -1081,7 +1110,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                         */
                        ev = normal_pmc_alternative(ev, flags);
                        if (!ev)
-                               return ERR_PTR(-EINVAL);
+                               return -EINVAL;
                }
        }
 
@@ -1095,19 +1124,19 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                n = collect_events(event->group_leader, ppmu->n_counter - 1,
                                   ctrs, events, cflags);
                if (n < 0)
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
        }
        events[n] = ev;
        ctrs[n] = event;
        cflags[n] = flags;
        if (check_excludes(ctrs, cflags, n, 1))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        cpuhw = &get_cpu_var(cpu_hw_events);
        err = power_check_constraints(cpuhw, events, cflags, n + 1);
        put_cpu_var(cpu_hw_events);
        if (err)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        event->hw.config = events[n];
        event->hw.event_base = cflags[n];
@@ -1132,11 +1161,23 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        }
        event->destroy = hw_perf_event_destroy;
 
-       if (err)
-               return ERR_PTR(err);
-       return &power_pmu;
+       return err;
 }
 
+struct pmu power_pmu = {
+       .pmu_enable     = power_pmu_enable,
+       .pmu_disable    = power_pmu_disable,
+       .event_init     = power_pmu_event_init,
+       .add            = power_pmu_add,
+       .del            = power_pmu_del,
+       .start          = power_pmu_start,
+       .stop           = power_pmu_stop,
+       .read           = power_pmu_read,
+       .start_txn      = power_pmu_start_txn,
+       .cancel_txn     = power_pmu_cancel_txn,
+       .commit_txn     = power_pmu_commit_txn,
+};
+
 /*
  * A counter has overflowed; update its count and record
  * things if requested.  Note that interrupts are hard-disabled
@@ -1149,6 +1190,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        s64 prev, delta, left;
        int record = 0;
 
+       if (event->hw.state & PERF_HES_STOPPED) {
+               write_pmc(event->hw.idx, 0);
+               return;
+       }
+
        /* we don't have to worry about interrupts here */
        prev = local64_read(&event->hw.prev_count);
        delta = (val - prev) & 0xfffffffful;
@@ -1171,6 +1217,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                        val = 0x80000000LL - left;
        }
 
+       write_pmc(event->hw.idx, val);
+       local64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.period_left, left);
+       perf_event_update_userpage(event);
+
        /*
         * Finally record data if requested.
         */
@@ -1183,23 +1234,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                if (event->attr.sample_type & PERF_SAMPLE_ADDR)
                        perf_get_data_addr(regs, &data.addr);
 
-               if (perf_event_overflow(event, nmi, &data, regs)) {
-                       /*
-                        * Interrupts are coming too fast - throttle them
-                        * by setting the event to 0, so it will be
-                        * at least 2^30 cycles until the next interrupt
-                        * (assuming each event counts at most 2 counts
-                        * per cycle).
-                        */
-                       val = 0;
-                       left = ~0ULL >> 1;
-               }
+               if (perf_event_overflow(event, nmi, &data, regs))
+                       power_pmu_stop(event, 0);
        }
-
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
-       perf_event_update_userpage(event);
 }
 
 /*
@@ -1342,6 +1379,7 @@ int register_power_pmu(struct power_pmu *pmu)
                freeze_events_kernel = MMCR0_FCHV;
 #endif /* CONFIG_PPC64 */
 
+       perf_pmu_register(&power_pmu);
        perf_cpu_notifier(power_pmu_notifier);
 
        return 0;
index 1ba45471ae436617e1ecbf3654a5064ef15d1af7..7ecca59ddf77fe20bd46b470d9392cdd16fd5ba9 100644 (file)
@@ -156,6 +156,9 @@ static void fsl_emb_pmu_read(struct perf_event *event)
 {
        s64 val, delta, prev;
 
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
        /*
         * Performance monitor interrupts come even when interrupts
         * are soft-disabled, as long as interrupts are hard-enabled.
@@ -177,7 +180,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
  * Disable all events to prevent PMU interrupts and to allow
  * events to be added or removed.
  */
-void hw_perf_disable(void)
+static void fsl_emb_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -216,7 +219,7 @@ void hw_perf_disable(void)
  * If we were previously disabled and events were added, then
  * put the new config on the PMU.
  */
-void hw_perf_enable(void)
+static void fsl_emb_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -262,8 +265,8 @@ static int collect_events(struct perf_event *group, int max_count,
        return n;
 }
 
-/* perf must be disabled, context locked on entry */
-static int fsl_emb_pmu_enable(struct perf_event *event)
+/* context locked on entry */
+static int fsl_emb_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuhw;
        int ret = -EAGAIN;
@@ -271,6 +274,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
        u64 val;
        int i;
 
+       perf_pmu_disable(event->pmu);
        cpuhw = &get_cpu_var(cpu_hw_events);
 
        if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -301,6 +305,12 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
                        val = 0x80000000L - left;
        }
        local64_set(&event->hw.prev_count, val);
+
+       if (!(flags & PERF_EF_START)) {
+               event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+               val = 0;
+       }
+
        write_pmc(i, val);
        perf_event_update_userpage(event);
 
@@ -310,15 +320,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
        ret = 0;
  out:
        put_cpu_var(cpu_hw_events);
+       perf_pmu_enable(event->pmu);
        return ret;
 }
 
-/* perf must be disabled, context locked on entry */
-static void fsl_emb_pmu_disable(struct perf_event *event)
+/* context locked on entry */
+static void fsl_emb_pmu_del(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuhw;
        int i = event->hw.idx;
 
+       perf_pmu_disable(event->pmu);
        if (i < 0)
                goto out;
 
@@ -346,44 +358,57 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
        cpuhw->n_events--;
 
  out:
+       perf_pmu_enable(event->pmu);
        put_cpu_var(cpu_hw_events);
 }
 
-/*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
- *
- * Context is locked on entry, but perf is not disabled.
- */
-static void fsl_emb_pmu_unthrottle(struct perf_event *event)
+static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
 {
-       s64 val, left;
        unsigned long flags;
+       s64 left;
 
        if (event->hw.idx < 0 || !event->hw.sample_period)
                return;
+
+       if (!(event->hw.state & PERF_HES_STOPPED))
+               return;
+
+       if (ef_flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
        local_irq_save(flags);
-       perf_disable();
-       fsl_emb_pmu_read(event);
-       left = event->hw.sample_period;
-       event->hw.last_period = left;
-       val = 0;
-       if (left < 0x80000000L)
-               val = 0x80000000L - left;
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
+       perf_pmu_disable(event->pmu);
+
+       event->hw.state = 0;
+       left = local64_read(&event->hw.period_left);
+       write_pmc(event->hw.idx, left);
+
        perf_event_update_userpage(event);
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
-static struct pmu fsl_emb_pmu = {
-       .enable         = fsl_emb_pmu_enable,
-       .disable        = fsl_emb_pmu_disable,
-       .read           = fsl_emb_pmu_read,
-       .unthrottle     = fsl_emb_pmu_unthrottle,
-};
+static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
+{
+       unsigned long flags;
+
+       if (event->hw.idx < 0 || !event->hw.sample_period)
+               return;
+
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
+       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+
+       fsl_emb_pmu_read(event);
+       event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       write_pmc(event->hw.idx, 0);
+
+       perf_event_update_userpage(event);
+       perf_pmu_enable(event->pmu);
+       local_irq_restore(flags);
+}
 
 /*
  * Release the PMU if this is the last perf_event.
@@ -428,7 +453,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
        return 0;
 }
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int fsl_emb_pmu_event_init(struct perf_event *event)
 {
        u64 ev;
        struct perf_event *events[MAX_HWEVENTS];
@@ -441,14 +466,14 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        case PERF_TYPE_HARDWARE:
                ev = event->attr.config;
                if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
-                       return ERR_PTR(-EOPNOTSUPP);
+                       return -EOPNOTSUPP;
                ev = ppmu->generic_events[ev];
                break;
 
        case PERF_TYPE_HW_CACHE:
                err = hw_perf_cache_event(event->attr.config, &ev);
                if (err)
-                       return ERR_PTR(err);
+                       return err;
                break;
 
        case PERF_TYPE_RAW:
@@ -456,12 +481,12 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                break;
 
        default:
-               return ERR_PTR(-EINVAL);
+               return -ENOENT;
        }
 
        event->hw.config = ppmu->xlate_event(ev);
        if (!(event->hw.config & FSL_EMB_EVENT_VALID))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        /*
         * If this is in a group, check if it can go on with all the
@@ -473,7 +498,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                n = collect_events(event->group_leader,
                                   ppmu->n_counter - 1, events);
                if (n < 0)
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
        }
 
        if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
@@ -484,7 +509,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                }
 
                if (num_restricted >= ppmu->n_restricted)
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
        }
 
        event->hw.idx = -1;
@@ -497,7 +522,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        if (event->attr.exclude_kernel)
                event->hw.config_base |= PMLCA_FCS;
        if (event->attr.exclude_idle)
-               return ERR_PTR(-ENOTSUPP);
+               return -ENOTSUPP;
 
        event->hw.last_period = event->hw.sample_period;
        local64_set(&event->hw.period_left, event->hw.last_period);
@@ -523,11 +548,20 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        }
        event->destroy = hw_perf_event_destroy;
 
-       if (err)
-               return ERR_PTR(err);
-       return &fsl_emb_pmu;
+       return err;
 }
 
+static struct pmu fsl_emb_pmu = {
+       .pmu_enable     = fsl_emb_pmu_enable,
+       .pmu_disable    = fsl_emb_pmu_disable,
+       .event_init     = fsl_emb_pmu_event_init,
+       .add            = fsl_emb_pmu_add,
+       .del            = fsl_emb_pmu_del,
+       .start          = fsl_emb_pmu_start,
+       .stop           = fsl_emb_pmu_stop,
+       .read           = fsl_emb_pmu_read,
+};
+
 /*
  * A counter has overflowed; update its count and record
  * things if requested.  Note that interrupts are hard-disabled
@@ -540,6 +574,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        s64 prev, delta, left;
        int record = 0;
 
+       if (event->hw.state & PERF_HES_STOPPED) {
+               write_pmc(event->hw.idx, 0);
+               return;
+       }
+
        /* we don't have to worry about interrupts here */
        prev = local64_read(&event->hw.prev_count);
        delta = (val - prev) & 0xfffffffful;
@@ -562,6 +601,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                        val = 0x80000000LL - left;
        }
 
+       write_pmc(event->hw.idx, val);
+       local64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.period_left, left);
+       perf_event_update_userpage(event);
+
        /*
         * Finally record data if requested.
         */
@@ -571,23 +615,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                perf_sample_data_init(&data, 0);
                data.period = event->hw.last_period;
 
-               if (perf_event_overflow(event, nmi, &data, regs)) {
-                       /*
-                        * Interrupts are coming too fast - throttle them
-                        * by setting the event to 0, so it will be
-                        * at least 2^30 cycles until the next interrupt
-                        * (assuming each event counts at most 2 counts
-                        * per cycle).
-                        */
-                       val = 0;
-                       left = ~0ULL >> 1;
-               }
+               if (perf_event_overflow(event, nmi, &data, regs))
+                       fsl_emb_pmu_stop(event, 0);
        }
-
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
-       perf_event_update_userpage(event);
 }
 
 static void perf_event_interrupt(struct pt_regs *regs)
@@ -651,5 +681,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
        pr_info("%s performance monitor hardware support registered\n",
                pmu->name);
 
+       perf_pmu_register(&fsl_emb_pmu);
+
        return 0;
 }
index 5b243bd3eb3b699ee6a0712340c9df51db2ed948..3dc2a8d262b8731aa4995b6c4b20621f06742fef 100644 (file)
@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
        int id_match = 0;
 
        if (dev == NULL || id == NULL)
-               return NULL;
+               return clk;
 
        mutex_lock(&clocks_mutex);
        list_for_each_entry(p, &clocks, node) {
index 45c0cb9b67e6774958c621b6e3c8055d43e46163..18c10482019811fd4fa25cbf1b0972ef87d4d0c7 100644 (file)
@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
        if (bus_range == NULL || len < 2 * sizeof(int)) {
                printk(KERN_WARNING EFIKA_PLATFORM_NAME
                       ": Can't get bus-range for %s\n", pcictrl->full_name);
-               return;
+               goto out_put;
        }
 
        if (bus_range[1] == bus_range[0])
@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void)
        printk(" controlled by %s\n", pcictrl->full_name);
        printk("\n");
 
-       hose = pcibios_alloc_controller(of_node_get(pcictrl));
+       hose = pcibios_alloc_controller(pcictrl);
        if (!hose) {
                printk(KERN_WARNING EFIKA_PLATFORM_NAME
                       ": Can't allocate PCI controller structure for %s\n",
                       pcictrl->full_name);
-               return;
+               goto out_put;
        }
 
        hose->first_busno = bus_range[0];
@@ -124,6 +124,9 @@ static void __init efika_pcisetup(void)
        hose->ops = &rtas_pci_ops;
 
        pci_process_bridge_OF_ranges(hose, pcictrl, 0);
+       return;
+out_put:
+       of_node_put(pcictrl);
 }
 
 #else
index 6e905314ad5d66035daf38a514adbfaa2aa60dfa..41f3a7eda1def670c1c12864de488788fc27ff98 100644 (file)
@@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
        clrbits32(&simple_gpio->simple_dvo, sync | out);
        clrbits8(&wkup_gpio->wkup_dvo, reset);
 
-       /* wait at lease 1 us */
-       udelay(2);
+       /* wait for 1 us */
+       udelay(1);
 
        /* Deassert reset */
        setbits8(&wkup_gpio->wkup_dvo, reset);
 
+       /* wait at least 200ns */
+       /* 7 ~= (200ns * timebase) / ns2sec */
+       __delay(7);
+
        /* Restore pin-muxing */
        out_be32(&simple_gpio->port_config, mux);
 
index 22cfd634c35531b7f8a1d4057f92b4af72e20d1a..f7167ee4604cf7033e30eb3845aa8f9ba9fd538b 100644 (file)
@@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
        vfree(me->arch.syminfo);
        me->arch.syminfo = NULL;
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 43adddfe4c04b6d2eee9acfa8cad99a7788dfcc4..ae0be697a89e4b220f527a2bdb34f4da7ec7d318 100644 (file)
@@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr,
        int ret = 0;
 
        ret |= module_dwarf_finalize(hdr, sechdrs, me);
-       ret |= module_bug_finalize(hdr, sechdrs, me);
 
        return ret;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
        module_dwarf_cleanup(mod);
 }
index a9dd3abde28e3f45bbd7d7654e8717c13aed8f34..d5ca1ef50fa9694a1942a8c304bd2c5aa69d9388 100644 (file)
 #include <asm/unwinder.h>
 #include <asm/ptrace.h>
 
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
 
 static void callchain_warning(void *data, char *msg)
 {
@@ -39,7 +34,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
        struct perf_callchain_entry *entry = data;
 
        if (reliable)
-               callchain_store(entry, addr);
+               perf_callchain_store(entry, addr);
 }
 
 static const struct stacktrace_ops callchain_ops = {
@@ -49,47 +44,10 @@ static const struct stacktrace_ops callchain_ops = {
        .address        = callchain_address,
 };
 
-static void
-perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->pc);
+       perf_callchain_store(entry, regs->pc);
 
        unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       /*
-        * Only the kernel side is implemented for now.
-        */
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-}
-
-/*
- * No need for separate IRQ and NMI entries.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
-       entry->nr = 0;
-
-       perf_do_callchain(regs, entry);
-
-       return entry;
-}
index 55fe89bbdfe03503d3fde88269c790c7025af7b2..5a4b33435650c8ea108668d8e7e30786a20bd335 100644 (file)
@@ -224,50 +224,80 @@ again:
        local64_add(delta, &event->count);
 }
 
-static void sh_pmu_disable(struct perf_event *event)
+static void sh_pmu_stop(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
-       clear_bit(idx, cpuc->active_mask);
-       sh_pmu->disable(hwc, idx);
+       if (!(event->hw.state & PERF_HES_STOPPED)) {
+               sh_pmu->disable(hwc, idx);
+               cpuc->events[idx] = NULL;
+               event->hw.state |= PERF_HES_STOPPED;
+       }
+
+       if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+               sh_perf_event_update(event, &event->hw, idx);
+               event->hw.state |= PERF_HES_UPTODATE;
+       }
+}
 
-       barrier();
+static void sh_pmu_start(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
 
-       sh_perf_event_update(event, &event->hw, idx);
+       if (WARN_ON_ONCE(idx == -1))
+               return;
+
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+       cpuc->events[idx] = event;
+       event->hw.state = 0;
+       sh_pmu->enable(hwc, idx);
+}
+
+static void sh_pmu_del(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       cpuc->events[idx] = NULL;
-       clear_bit(idx, cpuc->used_mask);
+       sh_pmu_stop(event, PERF_EF_UPDATE);
+       __clear_bit(event->hw.idx, cpuc->used_mask);
 
        perf_event_update_userpage(event);
 }
 
-static int sh_pmu_enable(struct perf_event *event)
+static int sh_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
+       int ret = -EAGAIN;
+
+       perf_pmu_disable(event->pmu);
 
-       if (test_and_set_bit(idx, cpuc->used_mask)) {
+       if (__test_and_set_bit(idx, cpuc->used_mask)) {
                idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
                if (idx == sh_pmu->num_events)
-                       return -EAGAIN;
+                       goto out;
 
-               set_bit(idx, cpuc->used_mask);
+               __set_bit(idx, cpuc->used_mask);
                hwc->idx = idx;
        }
 
        sh_pmu->disable(hwc, idx);
 
-       cpuc->events[idx] = event;
-       set_bit(idx, cpuc->active_mask);
-
-       sh_pmu->enable(hwc, idx);
+       event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (flags & PERF_EF_START)
+               sh_pmu_start(event, PERF_EF_RELOAD);
 
        perf_event_update_userpage(event);
-
-       return 0;
+       ret = 0;
+out:
+       perf_pmu_enable(event->pmu);
+       return ret;
 }
 
 static void sh_pmu_read(struct perf_event *event)
@@ -275,24 +305,56 @@ static void sh_pmu_read(struct perf_event *event)
        sh_perf_event_update(event, &event->hw, event->hw.idx);
 }
 
-static const struct pmu pmu = {
-       .enable         = sh_pmu_enable,
-       .disable        = sh_pmu_disable,
-       .read           = sh_pmu_read,
-};
-
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int sh_pmu_event_init(struct perf_event *event)
 {
-       int err = __hw_perf_event_init(event);
+       int err;
+
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HW_CACHE:
+       case PERF_TYPE_HARDWARE:
+               err = __hw_perf_event_init(event);
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
        if (unlikely(err)) {
                if (event->destroy)
                        event->destroy(event);
-               return ERR_PTR(err);
        }
 
-       return &pmu;
+       return err;
+}
+
+static void sh_pmu_enable(struct pmu *pmu)
+{
+       if (!sh_pmu_initialized())
+               return;
+
+       sh_pmu->enable_all();
+}
+
+static void sh_pmu_disable(struct pmu *pmu)
+{
+       if (!sh_pmu_initialized())
+               return;
+
+       sh_pmu->disable_all();
 }
 
+static struct pmu pmu = {
+       .pmu_enable     = sh_pmu_enable,
+       .pmu_disable    = sh_pmu_disable,
+       .event_init     = sh_pmu_event_init,
+       .add            = sh_pmu_add,
+       .del            = sh_pmu_del,
+       .start          = sh_pmu_start,
+       .stop           = sh_pmu_stop,
+       .read           = sh_pmu_read,
+};
+
 static void sh_pmu_setup(int cpu)
 {
        struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -317,32 +379,17 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
        return NOTIFY_OK;
 }
 
-void hw_perf_enable(void)
-{
-       if (!sh_pmu_initialized())
-               return;
-
-       sh_pmu->enable_all();
-}
-
-void hw_perf_disable(void)
-{
-       if (!sh_pmu_initialized())
-               return;
-
-       sh_pmu->disable_all();
-}
-
-int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
+int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
 {
        if (sh_pmu)
                return -EBUSY;
-       sh_pmu = pmu;
+       sh_pmu = _pmu;
 
-       pr_info("Performance Events: %s support registered\n", pmu->name);
+       pr_info("Performance Events: %s support registered\n", _pmu->name);
 
-       WARN_ON(pmu->num_events > MAX_HWEVENTS);
+       WARN_ON(_pmu->num_events > MAX_HWEVENTS);
 
+       perf_pmu_register(&pmu);
        perf_cpu_notifier(sh_pmu_notifier);
        return 0;
 }
index 491e9d6de1912d56cea2b21f51bf8cfd7980e278..9212cd42a832339bd8160e9b87080fff135169e3 100644 (file)
@@ -30,6 +30,7 @@ config SPARC
        select PERF_USE_VMALLOC
        select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
+       select HAVE_ARCH_JUMP_LABEL
 
 config SPARC32
        def_bool !64BIT
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
new file mode 100644 (file)
index 0000000..62e66d7
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef _ASM_SPARC_JUMP_LABEL_H
+#define _ASM_SPARC_JUMP_LABEL_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/system.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+#define JUMP_LABEL(key, label)                                 \
+       do {                                                    \
+               asm goto("1:\n\t"                               \
+                        "nop\n\t"                              \
+                        "nop\n\t"                              \
+                        ".pushsection __jump_table,  \"a\"\n\t"\
+                        ".word 1b, %l[" #label "], %c0\n\t"    \
+                        ".popsection \n\t"                     \
+                        : :  "i" (key) :  : label);\
+       } while (0)
+
+#endif /* __KERNEL__ */
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+       jump_label_t code;
+       jump_label_t target;
+       jump_label_t key;
+};
+
+#endif
index 0c2dc1f24a9a74adb05299a89ee07f8ed2bc1eb9..599398fbbc7cb78fd2f8849400d9092a95ffa786 100644 (file)
@@ -119,3 +119,5 @@ obj-$(CONFIG_COMPAT)    += $(audit--y)
 
 pc--$(CONFIG_PERF_EVENTS) := perf_event.o
 obj-$(CONFIG_SPARC64)  += $(pc--y)
+
+obj-$(CONFIG_SPARC64)  += jump_label.o
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
new file mode 100644 (file)
index 0000000..ea2dafc
--- /dev/null
@@ -0,0 +1,47 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+void arch_jump_label_transform(struct jump_entry *entry,
+                              enum jump_label_type type)
+{
+       u32 val;
+       u32 *insn = (u32 *) (unsigned long) entry->code;
+
+       if (type == JUMP_LABEL_ENABLE) {
+               s32 off = (s32)entry->target - (s32)entry->code;
+
+#ifdef CONFIG_SPARC64
+               /* ba,pt %xcc, . + (off << 2) */
+               val = 0x10680000 | ((u32) off >> 2);
+#else
+               /* ba . + (off << 2) */
+               val = 0x10800000 | ((u32) off >> 2);
+#endif
+       } else {
+               val = 0x01000000;
+       }
+
+       get_online_cpus();
+       mutex_lock(&text_mutex);
+       *insn = val;
+       flushi(insn);
+       mutex_unlock(&text_mutex);
+       put_online_cpus();
+}
+
+void arch_jump_label_text_poke_early(jump_label_t addr)
+{
+       u32 *insn_p = (u32 *) (unsigned long) addr;
+
+       *insn_p = 0x01000000;
+       flushi(insn_p);
+}
+
+#endif
index f848aadf54dc1c2c1feb537752fdf3410efbe9d8..ee3c7dde8d9fbd5af21f5dbf555628eff1c64b9c 100644 (file)
@@ -18,6 +18,9 @@
 #include <asm/spitfire.h>
 
 #ifdef CONFIG_SPARC64
+
+#include <linux/jump_label.h>
+
 static void *module_map(unsigned long size)
 {
        struct vm_struct *area;
@@ -227,6 +230,9 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
+       /* make jump label nops */
+       jump_label_apply_nops(me);
+
        /* Cheetah's I-cache is fully coherent.  */
        if (tlb_type == spitfire) {
                unsigned long va;
index 6318e622cfb065d9da81e40b766c12dc0a6e2e7b..0d6deb55a2ae7e4189b5ab60aec81cd8df28adb6 100644 (file)
@@ -658,13 +658,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
 
                enc = perf_event_get_enc(cpuc->events[i]);
                pcr &= ~mask_for_index(idx);
-               pcr |= event_encoding(enc, idx);
+               if (hwc->state & PERF_HES_STOPPED)
+                       pcr |= nop_for_index(idx);
+               else
+                       pcr |= event_encoding(enc, idx);
        }
 out:
        return pcr;
 }
 
-void hw_perf_enable(void)
+static void sparc_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        u64 pcr;
@@ -691,7 +694,7 @@ void hw_perf_enable(void)
        pcr_ops->write(cpuc->pcr);
 }
 
-void hw_perf_disable(void)
+static void sparc_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        u64 val;
@@ -710,19 +713,65 @@ void hw_perf_disable(void)
        pcr_ops->write(cpuc->pcr);
 }
 
-static void sparc_pmu_disable(struct perf_event *event)
+static int active_event_index(struct cpu_hw_events *cpuc,
+                             struct perf_event *event)
+{
+       int i;
+
+       for (i = 0; i < cpuc->n_events; i++) {
+               if (cpuc->event[i] == event)
+                       break;
+       }
+       BUG_ON(i == cpuc->n_events);
+       return cpuc->current_idx[i];
+}
+
+static void sparc_pmu_start(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int idx = active_event_index(cpuc, event);
+
+       if (flags & PERF_EF_RELOAD) {
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+               sparc_perf_event_set_period(event, &event->hw, idx);
+       }
+
+       event->hw.state = 0;
+
+       sparc_pmu_enable_event(cpuc, &event->hw, idx);
+}
+
+static void sparc_pmu_stop(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int idx = active_event_index(cpuc, event);
+
+       if (!(event->hw.state & PERF_HES_STOPPED)) {
+               sparc_pmu_disable_event(cpuc, &event->hw, idx);
+               event->hw.state |= PERF_HES_STOPPED;
+       }
+
+       if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
+               sparc_perf_event_update(event, &event->hw, idx);
+               event->hw.state |= PERF_HES_UPTODATE;
+       }
+}
+
+static void sparc_pmu_del(struct perf_event *event, int _flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       struct hw_perf_event *hwc = &event->hw;
        unsigned long flags;
        int i;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        for (i = 0; i < cpuc->n_events; i++) {
                if (event == cpuc->event[i]) {
-                       int idx = cpuc->current_idx[i];
+                       /* Absorb the final count and turn off the
+                        * event.
+                        */
+                       sparc_pmu_stop(event, PERF_EF_UPDATE);
 
                        /* Shift remaining entries down into
                         * the existing slot.
@@ -734,13 +783,6 @@ static void sparc_pmu_disable(struct perf_event *event)
                                        cpuc->current_idx[i];
                        }
 
-                       /* Absorb the final count and turn off the
-                        * event.
-                        */
-                       sparc_pmu_disable_event(cpuc, hwc, idx);
-                       barrier();
-                       sparc_perf_event_update(event, hwc, idx);
-
                        perf_event_update_userpage(event);
 
                        cpuc->n_events--;
@@ -748,23 +790,10 @@ static void sparc_pmu_disable(struct perf_event *event)
                }
        }
 
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
-static int active_event_index(struct cpu_hw_events *cpuc,
-                             struct perf_event *event)
-{
-       int i;
-
-       for (i = 0; i < cpuc->n_events; i++) {
-               if (cpuc->event[i] == event)
-                       break;
-       }
-       BUG_ON(i == cpuc->n_events);
-       return cpuc->current_idx[i];
-}
-
 static void sparc_pmu_read(struct perf_event *event)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -774,15 +803,6 @@ static void sparc_pmu_read(struct perf_event *event)
        sparc_perf_event_update(event, hwc, idx);
 }
 
-static void sparc_pmu_unthrottle(struct perf_event *event)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       int idx = active_event_index(cpuc, event);
-       struct hw_perf_event *hwc = &event->hw;
-
-       sparc_pmu_enable_event(cpuc, hwc, idx);
-}
-
 static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmc_grab_mutex);
 
@@ -877,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts,
        if (!n_ev)
                return 0;
 
-       if (n_ev > perf_max_events)
+       if (n_ev > MAX_HWEVENTS)
                return -1;
 
        msk0 = perf_event_get_msk(events[0]);
@@ -984,23 +1004,27 @@ static int collect_events(struct perf_event *group, int max_count,
        return n;
 }
 
-static int sparc_pmu_enable(struct perf_event *event)
+static int sparc_pmu_add(struct perf_event *event, int ef_flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int n0, ret = -EAGAIN;
        unsigned long flags;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        n0 = cpuc->n_events;
-       if (n0 >= perf_max_events)
+       if (n0 >= MAX_HWEVENTS)
                goto out;
 
        cpuc->event[n0] = event;
        cpuc->events[n0] = event->hw.event_base;
        cpuc->current_idx[n0] = PIC_NO_INDEX;
 
+       event->hw.state = PERF_HES_UPTODATE;
+       if (!(ef_flags & PERF_EF_START))
+               event->hw.state |= PERF_HES_STOPPED;
+
        /*
         * If group events scheduling transaction was started,
         * skip the schedulability test here, it will be peformed
@@ -1020,12 +1044,12 @@ nocheck:
 
        ret = 0;
 out:
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
        return ret;
 }
 
-static int __hw_perf_event_init(struct perf_event *event)
+static int sparc_pmu_event_init(struct perf_event *event)
 {
        struct perf_event_attr *attr = &event->attr;
        struct perf_event *evts[MAX_HWEVENTS];
@@ -1038,22 +1062,33 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (atomic_read(&nmi_active) < 0)
                return -ENODEV;
 
-       pmap = NULL;
-       if (attr->type == PERF_TYPE_HARDWARE) {
+       switch (attr->type) {
+       case PERF_TYPE_HARDWARE:
                if (attr->config >= sparc_pmu->max_events)
                        return -EINVAL;
                pmap = sparc_pmu->event_map(attr->config);
-       } else if (attr->type == PERF_TYPE_HW_CACHE) {
+               break;
+
+       case PERF_TYPE_HW_CACHE:
                pmap = sparc_map_cache_event(attr->config);
                if (IS_ERR(pmap))
                        return PTR_ERR(pmap);
-       } else if (attr->type != PERF_TYPE_RAW)
-               return -EOPNOTSUPP;
+               break;
+
+       case PERF_TYPE_RAW:
+               pmap = NULL;
+               break;
+
+       default:
+               return -ENOENT;
+
+       }
 
        if (pmap) {
                hwc->event_base = perf_event_encode(pmap);
        } else {
-               /* User gives us "(encoding << 16) | pic_mask" for
+               /*
+                * User gives us "(encoding << 16) | pic_mask" for
                 * PERF_TYPE_RAW events.
                 */
                hwc->event_base = attr->config;
@@ -1071,7 +1106,7 @@ static int __hw_perf_event_init(struct perf_event *event)
        n = 0;
        if (event->group_leader != event) {
                n = collect_events(event->group_leader,
-                                  perf_max_events - 1,
+                                  MAX_HWEVENTS - 1,
                                   evts, events, current_idx_dmy);
                if (n < 0)
                        return -EINVAL;
@@ -1107,10 +1142,11 @@ static int __hw_perf_event_init(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  */
-static void sparc_pmu_start_txn(const struct pmu *pmu)
+static void sparc_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
+       perf_pmu_disable(pmu);
        cpuhw->group_flag |= PERF_EVENT_TXN;
 }
 
@@ -1119,11 +1155,12 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
  * Clear the flag and pmu::enable() will perform the
  * schedulability test.
  */
-static void sparc_pmu_cancel_txn(const struct pmu *pmu)
+static void sparc_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
 }
 
 /*
@@ -1131,7 +1168,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
  * Perform the group schedulability test as a whole
  * Return 0 if success
  */
-static int sparc_pmu_commit_txn(const struct pmu *pmu)
+static int sparc_pmu_commit_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int n;
@@ -1147,28 +1184,24 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
                return -EAGAIN;
 
        cpuc->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
        return 0;
 }
 
-static const struct pmu pmu = {
-       .enable         = sparc_pmu_enable,
-       .disable        = sparc_pmu_disable,
+static struct pmu pmu = {
+       .pmu_enable     = sparc_pmu_enable,
+       .pmu_disable    = sparc_pmu_disable,
+       .event_init     = sparc_pmu_event_init,
+       .add            = sparc_pmu_add,
+       .del            = sparc_pmu_del,
+       .start          = sparc_pmu_start,
+       .stop           = sparc_pmu_stop,
        .read           = sparc_pmu_read,
-       .unthrottle     = sparc_pmu_unthrottle,
        .start_txn      = sparc_pmu_start_txn,
        .cancel_txn     = sparc_pmu_cancel_txn,
        .commit_txn     = sparc_pmu_commit_txn,
 };
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
-       int err = __hw_perf_event_init(event);
-
-       if (err)
-               return ERR_PTR(err);
-       return &pmu;
-}
-
 void perf_event_print_debug(void)
 {
        unsigned long flags;
@@ -1244,7 +1277,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       sparc_pmu_disable_event(cpuc, hwc, idx);
+                       sparc_pmu_stop(event, 0);
        }
 
        return NOTIFY_STOP;
@@ -1285,28 +1318,21 @@ void __init init_hw_perf_events(void)
 
        pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
 
-       /* All sparc64 PMUs currently have 2 events.  */
-       perf_max_events = 2;
-
+       perf_pmu_register(&pmu);
        register_die_notifier(&perf_event_nmi_notifier);
 }
 
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
-
-static void perf_callchain_kernel(struct pt_regs *regs,
-                                 struct perf_callchain_entry *entry)
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                          struct pt_regs *regs)
 {
        unsigned long ksp, fp;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        int graph = 0;
 #endif
 
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->tpc);
+       stack_trace_flush();
+
+       perf_callchain_store(entry, regs->tpc);
 
        ksp = regs->u_regs[UREG_I6];
        fp = ksp + STACK_BIAS;
@@ -1330,13 +1356,13 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        pc = sf->callers_pc;
                        fp = (unsigned long)sf->fp + STACK_BIAS;
                }
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
                if ((pc + 8UL) == (unsigned long) &return_to_handler) {
                        int index = current->curr_ret_stack;
                        if (current->ret_stack && index >= graph) {
                                pc = current->ret_stack[index - graph].ret;
-                               callchain_store(entry, pc);
+                               perf_callchain_store(entry, pc);
                                graph++;
                        }
                }
@@ -1344,13 +1370,12 @@ static void perf_callchain_kernel(struct pt_regs *regs,
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-static void perf_callchain_user_64(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long ufp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->tpc);
+       perf_callchain_store(entry, regs->tpc);
 
        ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
        do {
@@ -1363,17 +1388,16 @@ static void perf_callchain_user_64(struct pt_regs *regs,
 
                pc = sf.callers_pc;
                ufp = (unsigned long)sf.fp + STACK_BIAS;
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-static void perf_callchain_user_32(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long ufp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->tpc);
+       perf_callchain_store(entry, regs->tpc);
 
        ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
        do {
@@ -1386,34 +1410,16 @@ static void perf_callchain_user_32(struct pt_regs *regs,
 
                pc = sf.callers_pc;
                ufp = (unsigned long)sf.fp;
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-/* Like powerpc we can't get PMU interrupts within the PMU handler,
- * so no need for separate NMI and IRQ chains as on x86.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
-       entry->nr = 0;
-       if (!user_mode(regs)) {
-               stack_trace_flush();
-               perf_callchain_kernel(regs, entry);
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-       if (regs) {
-               flushw_user();
-               if (test_thread_flag(TIF_32BIT))
-                       perf_callchain_user_32(regs, entry);
-               else
-                       perf_callchain_user_64(regs, entry);
-       }
-       return entry;
+       flushw_user();
+       if (test_thread_flag(TIF_32BIT))
+               perf_callchain_user_32(entry, regs);
+       else
+               perf_callchain_user_64(entry, regs);
 }
index 84f296ca9e63c85da528ee9a8d403cc8362c4097..8f58bdff20d7f7dd9b77d16a6901eb9db82f056b 100644 (file)
@@ -1506,13 +1506,6 @@ handle_ill:
        }
        STD_ENDPROC(handle_ill)
 
-       .pushsection .rodata, "a"
-       .align  8
-bpt_code:
-       bpt
-       ENDPROC(bpt_code)
-       .popsection
-
 /* Various stub interrupt handlers and syscall handlers */
 
 STD_ENTRY_LOCAL(_kernel_double_fault)
index 2ab233ba32c1564f8323884017108b0d53978366..47d0c37897d5874d0bfb95d3d2b87441bd2df6a0 100644 (file)
@@ -255,18 +255,6 @@ static void uml_net_tx_timeout(struct net_device *dev)
        netif_wake_queue(dev);
 }
 
-static int uml_net_set_mac(struct net_device *dev, void *addr)
-{
-       struct uml_net_private *lp = netdev_priv(dev);
-       struct sockaddr *hwaddr = addr;
-
-       spin_lock_irq(&lp->lock);
-       eth_mac_addr(dev, hwaddr->sa_data);
-       spin_unlock_irq(&lp->lock);
-
-       return 0;
-}
-
 static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
 {
        dev->mtu = new_mtu;
@@ -373,7 +361,7 @@ static const struct net_device_ops uml_netdev_ops = {
        .ndo_start_xmit         = uml_net_start_xmit,
        .ndo_set_multicast_list = uml_net_set_multicast_list,
        .ndo_tx_timeout         = uml_net_tx_timeout,
-       .ndo_set_mac_address    = uml_net_set_mac,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = uml_net_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
 };
@@ -472,7 +460,8 @@ static void eth_configure(int n, void *init, char *mac,
            ((*transport->user->init)(&lp->user, dev) != 0))
                goto out_unregister;
 
-       eth_mac_addr(dev, device->mac);
+       /* don't use eth_mac_addr, it will not work here */
+       memcpy(dev->dev_addr, device->mac, ETH_ALEN);
        dev->mtu = transport->user->mtu;
        dev->netdev_ops = &uml_netdev_ops;
        dev->ethtool_ops = &uml_net_ethtool_ops;
index cd145eda357950b66b1ad2f05f55bd0094df6006..49b5e1eb32622abbdab4b3631207460bc80a1e14 100644 (file)
@@ -62,7 +62,7 @@ static long execve1(const char *file,
        return error;
 }
 
-long um_execve(const char *file, char __user *__user *argv, char __user *__user *env)
+long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
 {
        long err;
 
@@ -72,8 +72,8 @@ long um_execve(const char *file, char __user *__user *argv, char __user *__user
        return err;
 }
 
-long sys_execve(const char __user *file, char __user *__user *argv,
-               char __user *__user *env)
+long sys_execve(const char __user *file, const char __user *const __user *argv,
+               const char __user *const __user *env)
 {
        long error;
        char *filename;
index 1303a105fe91dc5aabca314e4e94faf8573b9c65..5bf97db24a046283f8704e2f7e0ee91e7b4d4844 100644 (file)
@@ -1 +1 @@
-extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env);
+extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
index 5ddb246626dbb87afe7484b87c0c495c8643de8f..f958cb876ee3d3e47ddff71094e8026c0a110f5d 100644 (file)
@@ -60,8 +60,8 @@ int kernel_execve(const char *filename,
 
        fs = get_fs();
        set_fs(KERNEL_DS);
-       ret = um_execve(filename, (char __user *__user *)argv,
-                       (char __user *__user *) envp);
+       ret = um_execve(filename, (const char __user *const __user *)argv,
+                       (const char __user *const __user *) envp);
        set_fs(fs);
 
        return ret;
index cea0cd9a316fb987bfa611a1dffa06cdba1f0332..9815221976a74bbc2d70191a69c3ccda03188ca6 100644 (file)
@@ -33,6 +33,7 @@ config X86
        select HAVE_KRETPROBES
        select HAVE_OPTPROBES
        select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_C_RECORDMCOUNT
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
@@ -59,6 +60,8 @@ config X86
        select ANON_INODES
        select HAVE_ARCH_KMEMCHECK
        select HAVE_USER_RETURN_NOTIFIER
+       select HAVE_ARCH_JUMP_LABEL
+       select HAVE_TEXT_POKE_SMP
 
 config INSTRUCTION_DECODER
        def_bool (KPROBES || PERF_EVENTS)
@@ -2125,6 +2128,10 @@ config HAVE_ATOMIC_IOMAP
        def_bool y
        depends on X86_32
 
+config HAVE_TEXT_POKE_SMP
+       bool
+       select STOP_MACHINE if SMP
+
 source "net/Kconfig"
 
 source "drivers/Kconfig"
index 030f4b93e255ac00c9cd646a733eda26733a5324..5df2869c874baced33de00a78a7b693f3237ea0f 100644 (file)
@@ -58,7 +58,19 @@ static void parse_earlyprintk(void)
                if (arg[pos] == ',')
                        pos++;
 
-               if (!strncmp(arg, "ttyS", 4)) {
+               /*
+                * make sure we have
+                *      "serial,0x3f8,115200"
+                *      "serial,ttyS0,115200"
+                *      "ttyS0,115200"
+                */
+               if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
+                       port = simple_strtoull(arg + pos, &e, 16);
+                       if (port == 0 || arg + pos == e)
+                               port = DEFAULT_SERIAL_PORT;
+                       else
+                               pos = e - arg;
+               } else if (!strncmp(arg + pos, "ttyS", 4)) {
                        static const int bases[] = { 0x3f8, 0x2f8 };
                        int idx = 0;
 
index bc6abb7bc7ee3084aa8b5d87ae19244715469990..76561d20ea2f27f0edfd0eee6d043b98c6aa6e90 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/stringify.h>
+#include <linux/jump_label.h>
 #include <asm/asm.h>
 
 /*
@@ -160,6 +161,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
 #define __parainstructions_end NULL
 #endif
 
+extern void *text_poke_early(void *addr, const void *opcode, size_t len);
+
 /*
  * Clear and restore the kernel write-protection flag on the local CPU.
  * Allows the kernel to edit read-only pages.
@@ -180,4 +183,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
 extern void *text_poke(void *addr, const void *opcode, size_t len);
 extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
 
+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
+#define IDEAL_NOP_SIZE_5 5
+extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
+extern void arch_init_ideal_nop5(void);
+#else
+static inline void arch_init_ideal_nop5(void) {}
+#endif
+
 #endif /* _ASM_X86_ALTERNATIVE_H */
index d2544f1d705d3eb9cad310ac3486f94c2358b51d..cb030374b90aeb526ad7f985527def42643bc1a9 100644 (file)
@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
 
 #endif /* !CONFIG_AMD_IOMMU_STATS */
 
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+       return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+              (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
 #endif /* _ASM_X86_AMD_IOMMU_PROTO_H  */
index 7014e88bc7798af33f681724ff90cf09eda52af5..08616180deaf5b1e94bfe9ad0079cec7e9c619a3 100644 (file)
@@ -368,6 +368,9 @@ struct amd_iommu {
        /* capabilities of that IOMMU read from ACPI */
        u32 cap;
 
+       /* flags read from acpi table */
+       u8 acpi_flags;
+
        /*
         * Capability pointer. There could be more than one IOMMU per PCI
         * device function if there are more than one AMD IOMMU capability
@@ -411,6 +414,15 @@ struct amd_iommu {
 
        /* default dma_ops domain for that IOMMU */
        struct dma_ops_domain *default_dom;
+
+       /*
+        * This array is required to work around a potential BIOS bug.
+        * The BIOS may miss to restore parts of the PCI configuration
+        * space when the system resumes from S3. The result is that the
+        * IOMMU does not execute commands anymore which leads to system
+        * failure.
+        */
+       u32 cache_cfg[4];
 };
 
 /*
index 545776efeb164c72d523f2c78f2c501e1535344c..bafd80defa4328ed2f49e808daee7a624434345d 100644 (file)
@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
 static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
 {
        return ((1UL << (nr % BITS_PER_LONG)) &
-               (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
+               (addr[nr / BITS_PER_LONG])) != 0;
 }
 
 static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
index c6fbb7b430d167c7663f99901b2c653ec6556911..3f76523589afa8b9f30abcb6841962b6344d603e 100644 (file)
 #define X86_FEATURE_XSAVEOPT   (7*32+ 4) /* Optimized Xsave */
 #define X86_FEATURE_PLN                (7*32+ 5) /* Intel Power Limit Notification */
 #define X86_FEATURE_PTS                (7*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_DTS                (7*32+ 7) /* Digital Thermal Sensor */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  (8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
new file mode 100644 (file)
index 0000000..f52d42e
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef _ASM_X86_JUMP_LABEL_H
+#define _ASM_X86_JUMP_LABEL_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/nops.h>
+
+#define JUMP_LABEL_NOP_SIZE 5
+
+# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
+
+# define JUMP_LABEL(key, label)                                        \
+       do {                                                    \
+               asm goto("1:"                                   \
+                       JUMP_LABEL_INITIAL_NOP                  \
+                       ".pushsection __jump_table,  \"a\" \n\t"\
+                       _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
+                       ".popsection \n\t"                      \
+                       : :  "i" (key) :  : label);             \
+       } while (0)
+
+#endif /* __KERNEL__ */
+
+#ifdef CONFIG_X86_64
+typedef u64 jump_label_t;
+#else
+typedef u32 jump_label_t;
+#endif
+
+struct jump_entry {
+       jump_label_t code;
+       jump_label_t target;
+       jump_label_t key;
+};
+
+#endif
index def500776b16a3b63d34da569021722e4d82f18a..a70cd216be5d729db1f364340f911d632819f18d 100644 (file)
 #define P4_ESCR_EMASK(v)       ((v) << P4_ESCR_EVENTMASK_SHIFT)
 #define P4_ESCR_TAG(v)         ((v) << P4_ESCR_TAG_SHIFT)
 
-/* Non HT mask */
-#define P4_ESCR_MASK                   \
-       (P4_ESCR_EVENT_MASK     |       \
-       P4_ESCR_EVENTMASK_MASK  |       \
-       P4_ESCR_TAG_MASK        |       \
-       P4_ESCR_TAG_ENABLE      |       \
-       P4_ESCR_T0_OS           |       \
-       P4_ESCR_T0_USR)
-
-/* HT mask */
-#define P4_ESCR_MASK_HT                        \
-       (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR)
-
 #define P4_CCCR_OVF                    0x80000000U
 #define P4_CCCR_CASCADE                        0x40000000U
 #define P4_CCCR_OVF_PMI_T0             0x04000000U
 #define P4_CCCR_THRESHOLD(v)           ((v) << P4_CCCR_THRESHOLD_SHIFT)
 #define P4_CCCR_ESEL(v)                        ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
 
-/* Non HT mask */
-#define P4_CCCR_MASK                           \
-       (P4_CCCR_OVF                    |       \
-       P4_CCCR_CASCADE                 |       \
-       P4_CCCR_OVF_PMI_T0              |       \
-       P4_CCCR_FORCE_OVF               |       \
-       P4_CCCR_EDGE                    |       \
-       P4_CCCR_THRESHOLD_MASK          |       \
-       P4_CCCR_COMPLEMENT              |       \
-       P4_CCCR_COMPARE                 |       \
-       P4_CCCR_ESCR_SELECT_MASK        |       \
-       P4_CCCR_ENABLE)
-
-/* HT mask */
-#define P4_CCCR_MASK_HT                                \
-       (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY)
-
 #define P4_GEN_ESCR_EMASK(class, name, bit)    \
        class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
 #define P4_ESCR_EMASK_BIT(class, name)         class##__##name
 #define P4_CONFIG_HT_SHIFT             63
 #define P4_CONFIG_HT                   (1ULL << P4_CONFIG_HT_SHIFT)
 
+/*
+ * The bits we allow to pass for RAW events
+ */
+#define P4_CONFIG_MASK_ESCR            \
+       P4_ESCR_EVENT_MASK      |       \
+       P4_ESCR_EVENTMASK_MASK  |       \
+       P4_ESCR_TAG_MASK        |       \
+       P4_ESCR_TAG_ENABLE
+
+#define P4_CONFIG_MASK_CCCR            \
+       P4_CCCR_EDGE            |       \
+       P4_CCCR_THRESHOLD_MASK  |       \
+       P4_CCCR_COMPLEMENT      |       \
+       P4_CCCR_COMPARE         |       \
+       P4_CCCR_THREAD_ANY      |       \
+       P4_CCCR_RESERVED
+
+/* some dangerous bits are reserved for kernel internals */
+#define P4_CONFIG_MASK                                   \
+       (p4_config_pack_escr(P4_CONFIG_MASK_ESCR))      | \
+       (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR))
+
 static inline bool p4_is_event_cascaded(u64 config)
 {
        u32 cccr = p4_config_unpack_cccr(config);
index fedf32a8c3ecdd7a1aaed41b2884d2c9b10bf033..9d3f485e5dd0114d0e868511e89e8e423d545ff4 100644 (file)
@@ -34,7 +34,7 @@ GCOV_PROFILE_paravirt.o               := n
 obj-y                  := process_$(BITS).o signal.o entry_$(BITS).o
 obj-y                  += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
 obj-y                  += time.o ioport.o ldt.o dumpstack.o
-obj-y                  += setup.o x86_init.o i8259.o irqinit.o
+obj-y                  += setup.o x86_init.o i8259.o irqinit.o jump_label.o
 obj-$(CONFIG_X86_VISWS)        += visws_quirks.o
 obj-$(CONFIG_X86_32)   += probe_roms_32.o
 obj-$(CONFIG_X86_32)   += sys_i386_32.o i386_ksyms_32.o
index fb7a5f052e2b8766d11115e3f7fc174fadf6ac2f..fb16f17e59bea7dcde91e6ad6275c7bad5bbdfb8 100644 (file)
@@ -61,7 +61,7 @@ struct cstate_entry {
                unsigned int ecx;
        } states[ACPI_PROCESSOR_MAX_POWER];
 };
-static struct cstate_entry *cpu_cstate_entry;  /* per CPU ptr */
+static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */
 
 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
 
index f65ab8b014c4f42421d77200243c39c7fd252165..a36bb90aef5383d68bcf4af0b0c33749d572163a 100644 (file)
@@ -195,7 +195,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
 
 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 extern s32 __smp_locks[], __smp_locks_end[];
-static void *text_poke_early(void *addr, const void *opcode, size_t len);
+void *text_poke_early(void *addr, const void *opcode, size_t len);
 
 /* Replace instructions with better alternatives for this CPU type.
    This runs before SMP is initialized to avoid SMP problems with
@@ -522,7 +522,7 @@ void __init alternative_instructions(void)
  * instructions. And on the local CPU you need to be protected again NMI or MCE
  * handlers seeing an inconsistent instruction while you patch.
  */
-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
+void *__init_or_module text_poke_early(void *addr, const void *opcode,
                                              size_t len)
 {
        unsigned long flags;
@@ -637,7 +637,72 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
        tpp.len = len;
        atomic_set(&stop_machine_first, 1);
        wrote_text = 0;
-       stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+       /* Use __stop_machine() because the caller already got online_cpus. */
+       __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
        return addr;
 }
 
+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
+
+unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
+
+void __init arch_init_ideal_nop5(void)
+{
+       extern const unsigned char ftrace_test_p6nop[];
+       extern const unsigned char ftrace_test_nop5[];
+       extern const unsigned char ftrace_test_jmp[];
+       int faulted = 0;
+
+       /*
+        * There is no good nop for all x86 archs.
+        * We will default to using the P6_NOP5, but first we
+        * will test to make sure that the nop will actually
+        * work on this CPU. If it faults, we will then
+        * go to a lesser efficient 5 byte nop. If that fails
+        * we then just use a jmp as our nop. This isn't the most
+        * efficient nop, but we can not use a multi part nop
+        * since we would then risk being preempted in the middle
+        * of that nop, and if we enabled tracing then, it might
+        * cause a system crash.
+        *
+        * TODO: check the cpuid to determine the best nop.
+        */
+       asm volatile (
+               "ftrace_test_jmp:"
+               "jmp ftrace_test_p6nop\n"
+               "nop\n"
+               "nop\n"
+               "nop\n"  /* 2 byte jmp + 3 bytes */
+               "ftrace_test_p6nop:"
+               P6_NOP5
+               "jmp 1f\n"
+               "ftrace_test_nop5:"
+               ".byte 0x66,0x66,0x66,0x66,0x90\n"
+               "1:"
+               ".section .fixup, \"ax\"\n"
+               "2:     movl $1, %0\n"
+               "       jmp ftrace_test_nop5\n"
+               "3:     movl $2, %0\n"
+               "       jmp 1b\n"
+               ".previous\n"
+               _ASM_EXTABLE(ftrace_test_p6nop, 2b)
+               _ASM_EXTABLE(ftrace_test_nop5, 3b)
+               : "=r"(faulted) : "0" (faulted));
+
+       switch (faulted) {
+       case 0:
+               pr_info("converting mcount calls to 0f 1f 44 00 00\n");
+               memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
+               break;
+       case 1:
+               pr_info("converting mcount calls to 66 66 66 66 90\n");
+               memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
+               break;
+       case 2:
+               pr_info("converting mcount calls to jmp . + 5\n");
+               memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
+               break;
+       }
+
+}
+#endif
index fa044e1e30a2ed081175480dccec352a7e381392..679b6450382b9c8bafa2d71e2677fcd5ae075b95 100644 (file)
@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
                           size_t size,
                           int dir)
 {
+       dma_addr_t flush_addr;
        dma_addr_t i, start;
        unsigned int pages;
 
@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
            (dma_addr + size > dma_dom->aperture_size))
                return;
 
+       flush_addr = dma_addr;
        pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
        dma_addr &= PAGE_MASK;
        start = dma_addr;
@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
        dma_ops_free_addresses(dma_dom, dma_addr, pages);
 
        if (amd_iommu_unmap_flush || dma_dom->need_flush) {
-               iommu_flush_pages(&dma_dom->domain, dma_addr, size);
+               iommu_flush_pages(&dma_dom->domain, flush_addr, size);
                dma_dom->need_flush = false;
        }
 }
index 3cc63e2b8dd4c4acc4ee7f77c3ad432d97beb169..5a170cbbbed86aa1376eb19336a5329aacc96e91 100644 (file)
@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
        iommu->last_device = calc_devid(MMIO_GET_BUS(range),
                                        MMIO_GET_LD(range));
        iommu->evt_msi_num = MMIO_MSI_NUM(misc);
+
+       if (is_rd890_iommu(iommu->dev)) {
+               pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
+               pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
+               pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
+               pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
+       }
 }
 
 /*
@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
        struct ivhd_entry *e;
 
        /*
-        * First set the recommended feature enable bits from ACPI
-        * into the IOMMU control registers
+        * First save the recommended feature enable bits from ACPI
         */
-       h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
-               iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
-
-       h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
-               iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
-
-       h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
-               iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
-
-       h->flags & IVHD_FLAG_ISOC_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
-               iommu_feature_disable(iommu, CONTROL_ISOC_EN);
-
-       /*
-        * make IOMMU memory accesses cache coherent
-        */
-       iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+       iommu->acpi_flags = h->flags;
 
        /*
         * Done. Now parse the device entries
@@ -1116,6 +1103,40 @@ static void init_device_table(void)
        }
 }
 
+static void iommu_init_flags(struct amd_iommu *iommu)
+{
+       iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+               iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+               iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+               iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+               iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+       /*
+        * make IOMMU memory accesses cache coherent
+        */
+       iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+}
+
+static void iommu_apply_quirks(struct amd_iommu *iommu)
+{
+       if (is_rd890_iommu(iommu->dev)) {
+               pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
+               pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
+               pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
+               pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
+       }
+}
+
 /*
  * This function finally enables all IOMMUs found in the system after
  * they have been initialized
@@ -1126,6 +1147,8 @@ static void enable_iommus(void)
 
        for_each_iommu(iommu) {
                iommu_disable(iommu);
+               iommu_apply_quirks(iommu);
+               iommu_init_flags(iommu);
                iommu_set_device_table(iommu);
                iommu_enable_command_buffer(iommu);
                iommu_enable_event_buffer(iommu);
index f1efebaf55105fa835ac7938c1654295fdd81562..5c5b8f3dddb58686ba4afc8b314237c0c318370b 100644 (file)
@@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc,
 
        old_cfg = old_desc->chip_data;
 
-       memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
+       cfg->vector = old_cfg->vector;
+       cfg->move_in_progress = old_cfg->move_in_progress;
+       cpumask_copy(cfg->domain, old_cfg->domain);
+       cpumask_copy(cfg->old_domain, old_cfg->old_domain);
 
        init_copy_irq_2_pin(old_cfg, cfg, node);
 }
 
-static void free_irq_cfg(struct irq_cfg *old_cfg)
+static void free_irq_cfg(struct irq_cfg *cfg)
 {
-       kfree(old_cfg);
+       free_cpumask_var(cfg->domain);
+       free_cpumask_var(cfg->old_domain);
+       kfree(cfg);
 }
 
 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
index 490dac63c2d21e90ab3af70c543b012f1c3f43b5..f2f9ac7da25ccfba6d5ba7ea44b63899ba672e97 100644 (file)
@@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
        }
 }
 
-static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
+void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
 {
        u32 tfms, xlvl;
        u32 ebx;
index 3624e8a0f71bf72e4c3cd86abcedfbb1c53d9b4d..f668bb1f7d432811bc3f773d777ca2596b6f6e33 100644 (file)
@@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
                            *const __x86_cpu_dev_end[];
 
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+extern void get_cpu_cap(struct cpuinfo_x86 *c);
 
 #endif
index 994230d4dc4e545986a8c982629589905f0f224d..4f6f679f27990198640f9a1e139ea3a838b05eb8 100644 (file)
@@ -368,16 +368,22 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
                return -ENODEV;
 
        out_obj = output.pointer;
-       if (out_obj->type != ACPI_TYPE_BUFFER)
-               return -ENODEV;
+       if (out_obj->type != ACPI_TYPE_BUFFER) {
+               ret = -ENODEV;
+               goto out_free;
+       }
 
        errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
-       if (errors)
-               return -ENODEV;
+       if (errors) {
+               ret = -ENODEV;
+               goto out_free;
+       }
 
        supported = *((u32 *)(out_obj->buffer.pointer + 4));
-       if (!(supported & 0x1))
-               return -ENODEV;
+       if (!(supported & 0x1)) {
+               ret = -ENODEV;
+               goto out_free;
+       }
 
 out_free:
        kfree(output.pointer);
index 85f69cdeae1020a18e1c9097c8da276a8e50b992..b4389441efbbd8e791289aa936369e5e73917c18 100644 (file)
@@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
                        misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
                        wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
                        c->cpuid_level = cpuid_eax(0);
+                       get_cpu_cap(c);
                }
        }
 
index 03a5b0385ad6c4402d2192b42aa14e9bfaccc80e..e2513f26ba8bbbf45eef5a4f510b9f439d55402b 100644 (file)
@@ -531,7 +531,7 @@ static int x86_pmu_hw_config(struct perf_event *event)
 /*
  * Setup the hardware configuration for a given attr_type
  */
-static int __hw_perf_event_init(struct perf_event *event)
+static int __x86_pmu_event_init(struct perf_event *event)
 {
        int err;
 
@@ -584,7 +584,7 @@ static void x86_pmu_disable_all(void)
        }
 }
 
-void hw_perf_disable(void)
+static void x86_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -619,7 +619,7 @@ static void x86_pmu_enable_all(int added)
        }
 }
 
-static const struct pmu pmu;
+static struct pmu pmu;
 
 static inline int is_x86_event(struct perf_event *event)
 {
@@ -801,10 +801,10 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
                hwc->last_tag == cpuc->tags[i];
 }
 
-static int x86_pmu_start(struct perf_event *event);
-static void x86_pmu_stop(struct perf_event *event);
+static void x86_pmu_start(struct perf_event *event, int flags);
+static void x86_pmu_stop(struct perf_event *event, int flags);
 
-void hw_perf_enable(void)
+static void x86_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct perf_event *event;
@@ -840,7 +840,14 @@ void hw_perf_enable(void)
                            match_prev_assignment(hwc, cpuc, i))
                                continue;
 
-                       x86_pmu_stop(event);
+                       /*
+                        * Ensure we don't accidentally enable a stopped
+                        * counter simply because we rescheduled.
+                        */
+                       if (hwc->state & PERF_HES_STOPPED)
+                               hwc->state |= PERF_HES_ARCH;
+
+                       x86_pmu_stop(event, PERF_EF_UPDATE);
                }
 
                for (i = 0; i < cpuc->n_events; i++) {
@@ -852,7 +859,10 @@ void hw_perf_enable(void)
                        else if (i < n_running)
                                continue;
 
-                       x86_pmu_start(event);
+                       if (hwc->state & PERF_HES_ARCH)
+                               continue;
+
+                       x86_pmu_start(event, PERF_EF_RELOAD);
                }
                cpuc->n_added = 0;
                perf_events_lapic_init();
@@ -953,15 +963,12 @@ static void x86_pmu_enable_event(struct perf_event *event)
 }
 
 /*
- * activate a single event
+ * Add a single event to the PMU.
  *
  * The event is added to the group of enabled events
  * but only if it can be scehduled with existing events.
- *
- * Called with PMU disabled. If successful and return value 1,
- * then guaranteed to call perf_enable() and hw_perf_enable()
  */
-static int x86_pmu_enable(struct perf_event *event)
+static int x86_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc;
@@ -970,58 +977,67 @@ static int x86_pmu_enable(struct perf_event *event)
 
        hwc = &event->hw;
 
+       perf_pmu_disable(event->pmu);
        n0 = cpuc->n_events;
-       n = collect_events(cpuc, event, false);
-       if (n < 0)
-               return n;
+       ret = n = collect_events(cpuc, event, false);
+       if (ret < 0)
+               goto out;
+
+       hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (!(flags & PERF_EF_START))
+               hwc->state |= PERF_HES_ARCH;
 
        /*
         * If group events scheduling transaction was started,
         * skip the schedulability test here, it will be peformed
-        * at commit time(->commit_txn) as a whole
+        * at commit time (->commit_txn) as a whole
         */
        if (cpuc->group_flag & PERF_EVENT_TXN)
-               goto out;
+               goto done_collect;
 
        ret = x86_pmu.schedule_events(cpuc, n, assign);
        if (ret)
-               return ret;
+               goto out;
        /*
         * copy new assignment, now we know it is possible
         * will be used by hw_perf_enable()
         */
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
-out:
+done_collect:
        cpuc->n_events = n;
        cpuc->n_added += n - n0;
        cpuc->n_txn += n - n0;
 
-       return 0;
+       ret = 0;
+out:
+       perf_pmu_enable(event->pmu);
+       return ret;
 }
 
-static int x86_pmu_start(struct perf_event *event)
+static void x86_pmu_start(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx = event->hw.idx;
 
-       if (idx == -1)
-               return -EAGAIN;
+       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+               return;
+
+       if (WARN_ON_ONCE(idx == -1))
+               return;
+
+       if (flags & PERF_EF_RELOAD) {
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+               x86_perf_event_set_period(event);
+       }
+
+       event->hw.state = 0;
 
-       x86_perf_event_set_period(event);
        cpuc->events[idx] = event;
        __set_bit(idx, cpuc->active_mask);
        __set_bit(idx, cpuc->running);
        x86_pmu.enable(event);
        perf_event_update_userpage(event);
-
-       return 0;
-}
-
-static void x86_pmu_unthrottle(struct perf_event *event)
-{
-       int ret = x86_pmu_start(event);
-       WARN_ON_ONCE(ret);
 }
 
 void perf_event_print_debug(void)
@@ -1078,27 +1094,29 @@ void perf_event_print_debug(void)
        local_irq_restore(flags);
 }
 
-static void x86_pmu_stop(struct perf_event *event)
+static void x86_pmu_stop(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
 
-       if (!__test_and_clear_bit(idx, cpuc->active_mask))
-               return;
-
-       x86_pmu.disable(event);
-
-       /*
-        * Drain the remaining delta count out of a event
-        * that we are disabling:
-        */
-       x86_perf_event_update(event);
+       if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
+               x86_pmu.disable(event);
+               cpuc->events[hwc->idx] = NULL;
+               WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+               hwc->state |= PERF_HES_STOPPED;
+       }
 
-       cpuc->events[idx] = NULL;
+       if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+               /*
+                * Drain the remaining delta count out of a event
+                * that we are disabling:
+                */
+               x86_perf_event_update(event);
+               hwc->state |= PERF_HES_UPTODATE;
+       }
 }
 
-static void x86_pmu_disable(struct perf_event *event)
+static void x86_pmu_del(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int i;
@@ -1111,7 +1129,7 @@ static void x86_pmu_disable(struct perf_event *event)
        if (cpuc->group_flag & PERF_EVENT_TXN)
                return;
 
-       x86_pmu_stop(event);
+       x86_pmu_stop(event, PERF_EF_UPDATE);
 
        for (i = 0; i < cpuc->n_events; i++) {
                if (event == cpuc->event_list[i]) {
@@ -1134,7 +1152,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
        struct perf_sample_data data;
        struct cpu_hw_events *cpuc;
        struct perf_event *event;
-       struct hw_perf_event *hwc;
        int idx, handled = 0;
        u64 val;
 
@@ -1155,7 +1172,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                }
 
                event = cpuc->events[idx];
-               hwc = &event->hw;
 
                val = x86_perf_event_update(event);
                if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
@@ -1171,7 +1187,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       x86_pmu_stop(event);
+                       x86_pmu_stop(event, 0);
        }
 
        if (handled)
@@ -1388,7 +1404,6 @@ void __init init_hw_perf_events(void)
                x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
        }
        x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
-       perf_max_events = x86_pmu.num_counters;
 
        if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
                WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
@@ -1424,6 +1439,7 @@ void __init init_hw_perf_events(void)
        pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
        pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
 
+       perf_pmu_register(&pmu);
        perf_cpu_notifier(x86_pmu_notifier);
 }
 
@@ -1437,10 +1453,11 @@ static inline void x86_pmu_read(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  */
-static void x86_pmu_start_txn(const struct pmu *pmu)
+static void x86_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
+       perf_pmu_disable(pmu);
        cpuc->group_flag |= PERF_EVENT_TXN;
        cpuc->n_txn = 0;
 }
@@ -1450,7 +1467,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu)
  * Clear the flag and pmu::enable() will perform the
  * schedulability test.
  */
-static void x86_pmu_cancel_txn(const struct pmu *pmu)
+static void x86_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -1460,6 +1477,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
         */
        cpuc->n_added -= cpuc->n_txn;
        cpuc->n_events -= cpuc->n_txn;
+       perf_pmu_enable(pmu);
 }
 
 /*
@@ -1467,7 +1485,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
  * Perform the group schedulability test as a whole
  * Return 0 if success
  */
-static int x86_pmu_commit_txn(const struct pmu *pmu)
+static int x86_pmu_commit_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int assign[X86_PMC_IDX_MAX];
@@ -1489,22 +1507,10 @@ static int x86_pmu_commit_txn(const struct pmu *pmu)
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
        cpuc->group_flag &= ~PERF_EVENT_TXN;
-
+       perf_pmu_enable(pmu);
        return 0;
 }
 
-static const struct pmu pmu = {
-       .enable         = x86_pmu_enable,
-       .disable        = x86_pmu_disable,
-       .start          = x86_pmu_start,
-       .stop           = x86_pmu_stop,
-       .read           = x86_pmu_read,
-       .unthrottle     = x86_pmu_unthrottle,
-       .start_txn      = x86_pmu_start_txn,
-       .cancel_txn     = x86_pmu_cancel_txn,
-       .commit_txn     = x86_pmu_commit_txn,
-};
-
 /*
  * validate that we can schedule this event
  */
@@ -1579,12 +1585,22 @@ out:
        return ret;
 }
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+int x86_pmu_event_init(struct perf_event *event)
 {
-       const struct pmu *tmp;
+       struct pmu *tmp;
        int err;
 
-       err = __hw_perf_event_init(event);
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
+       err = __x86_pmu_event_init(event);
        if (!err) {
                /*
                 * we temporarily connect event to its pmu
@@ -1604,26 +1620,31 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        if (err) {
                if (event->destroy)
                        event->destroy(event);
-               return ERR_PTR(err);
        }
 
-       return &pmu;
+       return err;
 }
 
-/*
- * callchain support
- */
+static struct pmu pmu = {
+       .pmu_enable     = x86_pmu_enable,
+       .pmu_disable    = x86_pmu_disable,
 
-static inline
-void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
+       .event_init     = x86_pmu_event_init,
 
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
+       .add            = x86_pmu_add,
+       .del            = x86_pmu_del,
+       .start          = x86_pmu_start,
+       .stop           = x86_pmu_stop,
+       .read           = x86_pmu_read,
 
+       .start_txn      = x86_pmu_start_txn,
+       .cancel_txn     = x86_pmu_cancel_txn,
+       .commit_txn     = x86_pmu_commit_txn,
+};
+
+/*
+ * callchain support
+ */
 
 static void
 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
@@ -1645,7 +1666,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
 {
        struct perf_callchain_entry *entry = data;
 
-       callchain_store(entry, addr);
+       perf_callchain_store(entry, addr);
 }
 
 static const struct stacktrace_ops backtrace_ops = {
@@ -1656,11 +1677,15 @@ static const struct stacktrace_ops backtrace_ops = {
        .walk_stack             = print_context_stack_bp,
 };
 
-static void
-perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->ip);
+       if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+               /* TODO: We don't support guest os callchain now */
+               return;
+       }
+
+       perf_callchain_store(entry, regs->ip);
 
        dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
 }
@@ -1689,7 +1714,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
                if (fp < compat_ptr(regs->sp))
                        break;
 
-               callchain_store(entry, frame.return_address);
+               perf_callchain_store(entry, frame.return_address);
                fp = compat_ptr(frame.next_frame);
        }
        return 1;
@@ -1702,19 +1727,20 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
 }
 #endif
 
-static void
-perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct stack_frame frame;
        const void __user *fp;
 
-       if (!user_mode(regs))
-               regs = task_pt_regs(current);
+       if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+               /* TODO: We don't support guest os callchain now */
+               return;
+       }
 
        fp = (void __user *)regs->bp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->ip);
+       perf_callchain_store(entry, regs->ip);
 
        if (perf_callchain_user32(regs, entry))
                return;
@@ -1731,52 +1757,11 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
                if ((unsigned long)fp < regs->sp)
                        break;
 
-               callchain_store(entry, frame.return_address);
+               perf_callchain_store(entry, frame.return_address);
                fp = frame.next_frame;
        }
 }
 
-static void
-perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-
-       if (current->mm)
-               perf_callchain_user(regs, entry);
-}
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry;
-
-       if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-               /* TODO: We don't support guest os callchain now */
-               return NULL;
-       }
-
-       if (in_nmi())
-               entry = &__get_cpu_var(pmc_nmi_entry);
-       else
-               entry = &__get_cpu_var(pmc_irq_entry);
-
-       entry->nr = 0;
-
-       perf_do_callchain(regs, entry);
-
-       return entry;
-}
-
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
 {
        unsigned long ip;
index ee05c90012d269e66de9ffb6a5952d1434317c1e..c8f5c088cad11ae3f245e1e7374bb43c915170d6 100644 (file)
@@ -713,18 +713,18 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        struct cpu_hw_events *cpuc;
        int bit, loops;
        u64 status;
-       int handled = 0;
+       int handled;
 
        perf_sample_data_init(&data, 0);
 
        cpuc = &__get_cpu_var(cpu_hw_events);
 
        intel_pmu_disable_all();
-       intel_pmu_drain_bts_buffer();
+       handled = intel_pmu_drain_bts_buffer();
        status = intel_pmu_get_status();
        if (!status) {
                intel_pmu_enable_all(0);
-               return 0;
+               return handled;
        }
 
        loops = 0;
@@ -763,7 +763,7 @@ again:
                data.period = event->hw.last_period;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       x86_pmu_stop(event);
+                       x86_pmu_stop(event, 0);
        }
 
        /*
index 18018d1311cdf3fa7f550b2fd7894c278505afe6..4977f9c400e5738cb668efc937c69cde22a2772d 100644 (file)
@@ -214,7 +214,7 @@ static void intel_pmu_disable_bts(void)
        update_debugctlmsr(debugctlmsr);
 }
 
-static void intel_pmu_drain_bts_buffer(void)
+static int intel_pmu_drain_bts_buffer(void)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct debug_store *ds = cpuc->ds;
@@ -231,16 +231,16 @@ static void intel_pmu_drain_bts_buffer(void)
        struct pt_regs regs;
 
        if (!event)
-               return;
+               return 0;
 
        if (!ds)
-               return;
+               return 0;
 
        at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
        top = (struct bts_record *)(unsigned long)ds->bts_index;
 
        if (top <= at)
-               return;
+               return 0;
 
        ds->bts_index = ds->bts_buffer_base;
 
@@ -256,7 +256,7 @@ static void intel_pmu_drain_bts_buffer(void)
        perf_prepare_sample(&header, &data, event, &regs);
 
        if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
-               return;
+               return 1;
 
        for (; at < top; at++) {
                data.ip         = at->from;
@@ -270,6 +270,7 @@ static void intel_pmu_drain_bts_buffer(void)
        /* There's new data available. */
        event->hw.interrupts++;
        event->pending_kill = POLL_IN;
+       return 1;
 }
 
 /*
@@ -491,7 +492,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
                regs.flags &= ~PERF_EFLAGS_EXACT;
 
        if (perf_event_overflow(event, 1, &data, &regs))
-               x86_pmu_stop(event);
+               x86_pmu_stop(event, 0);
 }
 
 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
index b560db3305be16ff954fc416137d17b17189b5e7..81400b93e69483e18bedf1c541753e2f915910cd 100644 (file)
@@ -18,6 +18,8 @@
 struct p4_event_bind {
        unsigned int opcode;                    /* Event code and ESCR selector */
        unsigned int escr_msr[2];               /* ESCR MSR for this event */
+       unsigned int escr_emask;                /* valid ESCR EventMask bits */
+       unsigned int shared;                    /* event is shared across threads */
        char cntr[2][P4_CNTR_LIMIT];            /* counter index (offset), -1 on abscence */
 };
 
@@ -66,231 +68,435 @@ static struct p4_event_bind p4_event_bind_map[] = {
        [P4_EVENT_TC_DELIVER_MODE] = {
                .opcode         = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
                .escr_msr       = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DB)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DI)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BD)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BB)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BI)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, ID),
+               .shared         = 1,
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_BPU_FETCH_REQUEST] = {
                .opcode         = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
                .escr_msr       = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST, TCMISS),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_ITLB_REFERENCE] = {
                .opcode         = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
                .escr_msr       = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, MISS)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT_UK),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_MEMORY_CANCEL] = {
                .opcode         = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
                .escr_msr       = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, 64K_CONF),
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_MEMORY_COMPLETE] = {
                .opcode         = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
                .escr_msr       = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, LSC)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC),
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_LOAD_PORT_REPLAY] = {
                .opcode         = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
                .escr_msr       = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD),
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_STORE_PORT_REPLAY] = {
                .opcode         = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
                .escr_msr       = { MSR_P4_SAAT_ESCR0 ,  MSR_P4_SAAT_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST),
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_MOB_LOAD_REPLAY] = {
                .opcode         = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
                .escr_msr       = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STA)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STD)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_PAGE_WALK_TYPE] = {
                .opcode         = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
                .escr_msr       = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, DTMISS)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, ITMISS),
+               .shared         = 1,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_BSQ_CACHE_REFERENCE] = {
                .opcode         = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
                .escr_msr       = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_IOQ_ALLOCATION] = {
                .opcode         = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, DEFAULT)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_READ)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_UC)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WC)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WT)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WP)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WB)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OWN)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OTHER)               |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, PREFETCH),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_IOQ_ACTIVE_ENTRIES] = {       /* shared ESCR */
                .opcode         = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
                .escr_msr       = { MSR_P4_FSB_ESCR1,  MSR_P4_FSB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ)        |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC)          |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC)          |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT)          |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP)          |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB)          |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH),
                .cntr           = { {2, -1, -1}, {3, -1, -1} },
        },
        [P4_EVENT_FSB_DATA_ACTIVITY] = {
                .opcode         = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER),
+               .shared         = 1,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_BSQ_ALLOCATION] = {           /* shared ESCR, broken CCCR1 */
                .opcode         = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
                .escr_msr       = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE)      |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE)      |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE)        |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE)        |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2),
                .cntr           = { {0, -1, -1}, {1, -1, -1} },
        },
        [P4_EVENT_BSQ_ACTIVE_ENTRIES] = {       /* shared ESCR */
                .opcode         = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
                .escr_msr       = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0)        |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1)        |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE)     |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE)  |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE)  |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE)    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE)    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2),
                .cntr           = { {2, -1, -1}, {3, -1, -1} },
        },
        [P4_EVENT_SSE_INPUT_ASSIST] = {
                .opcode         = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_PACKED_SP_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_PACKED_DP_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_SCALAR_SP_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_SCALAR_DP_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_64BIT_MMX_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_128BIT_MMX_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_X87_FP_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_X87_FP_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_TC_MISC] = {
                .opcode         = P4_OPCODE(P4_EVENT_TC_MISC),
                .escr_msr       = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC, FLUSH),
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_GLOBAL_POWER_EVENTS] = {
                .opcode         = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_TC_MS_XFER] = {
                .opcode         = P4_OPCODE(P4_EVENT_TC_MS_XFER),
                .escr_msr       = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER, CISC),
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_UOP_QUEUE_WRITES] = {
                .opcode         = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
                .escr_msr       = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD)     |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM),
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
                .opcode         = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
                .escr_msr       = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL)    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT),
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_RETIRED_BRANCH_TYPE] = {
                .opcode         = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
                .escr_msr       = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL)    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT),
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_RESOURCE_STALL] = {
                .opcode         = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
                .escr_msr       = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL, SBFULL),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_WC_BUFFER] = {
                .opcode         = P4_OPCODE(P4_EVENT_WC_BUFFER),
                .escr_msr       = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_EVICTS)               |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_B2B_CYCLES] = {
                .opcode         = P4_OPCODE(P4_EVENT_B2B_CYCLES),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     = 0,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_BNR] = {
                .opcode         = P4_OPCODE(P4_EVENT_BNR),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     = 0,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_SNOOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_SNOOP),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     = 0,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_RESPONSE] = {
                .opcode         = P4_OPCODE(P4_EVENT_RESPONSE),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     = 0,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_FRONT_END_EVENT] = {
                .opcode         = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, NBOGUS)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, BOGUS),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_EXECUTION_EVENT] = {
                .opcode         = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_REPLAY_EVENT] = {
                .opcode         = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, NBOGUS)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, BOGUS),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_INSTR_RETIRED] = {
                .opcode         = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
                .escr_msr       = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSTAG)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSTAG),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_UOPS_RETIRED] = {
                .opcode         = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
                .escr_msr       = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, NBOGUS)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, BOGUS),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_UOP_TYPE] = {
                .opcode         = P4_OPCODE(P4_EVENT_UOP_TYPE),
                .escr_msr       = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGLOADS)                  |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGSTORES),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_BRANCH_RETIRED] = {
                .opcode         = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNP)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNM)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTP)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTM),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_MISPRED_BRANCH_RETIRED] = {
                .opcode         = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
                .escr_msr       = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+               .escr_emask     =
+               P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_X87_ASSIST] = {
                .opcode         = P4_OPCODE(P4_EVENT_X87_ASSIST),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSU)                    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSO)                    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAO)                    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAU)                    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, PREA),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_MACHINE_CLEAR] = {
                .opcode         = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, CLEAR)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, MOCLEAR)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, SMCLEAR),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_INSTR_COMPLETED] = {
                .opcode         = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
                .escr_msr       = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, NBOGUS)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, BOGUS),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
 };
@@ -428,29 +634,73 @@ static u64 p4_pmu_event_map(int hw_event)
        return config;
 }
 
+/* check cpu model specifics */
+static bool p4_event_match_cpu_model(unsigned int event_idx)
+{
+       /* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */
+       if (event_idx == P4_EVENT_INSTR_COMPLETED) {
+               if (boot_cpu_data.x86_model != 3 &&
+                       boot_cpu_data.x86_model != 4 &&
+                       boot_cpu_data.x86_model != 6)
+                       return false;
+       }
+
+       /*
+        * For info
+        * - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2
+        */
+
+       return true;
+}
+
 static int p4_validate_raw_event(struct perf_event *event)
 {
-       unsigned int v;
+       unsigned int v, emask;
 
-       /* user data may have out-of-bound event index */
+       /* User data may have out-of-bound event index */
        v = p4_config_unpack_event(event->attr.config);
-       if (v >= ARRAY_SIZE(p4_event_bind_map)) {
-               pr_warning("P4 PMU: Unknown event code: %d\n", v);
+       if (v >= ARRAY_SIZE(p4_event_bind_map))
+               return -EINVAL;
+
+       /* It may be unsupported: */
+       if (!p4_event_match_cpu_model(v))
                return -EINVAL;
+
+       /*
+        * NOTE: P4_CCCR_THREAD_ANY has not the same meaning as
+        * in Architectural Performance Monitoring, it means not
+        * on _which_ logical cpu to count but rather _when_, ie it
+        * depends on logical cpu state -- count event if one cpu active,
+        * none, both or any, so we just allow user to pass any value
+        * desired.
+        *
+        * In turn we always set Tx_OS/Tx_USR bits bound to logical
+        * cpu without their propagation to another cpu
+        */
+
+       /*
+        * if an event is shared accross the logical threads
+        * the user needs special permissions to be able to use it
+        */
+       if (p4_event_bind_map[v].shared) {
+               if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+                       return -EACCES;
        }
 
+       /* ESCR EventMask bits may be invalid */
+       emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK;
+       if (emask & ~p4_event_bind_map[v].escr_emask)
+               return -EINVAL;
+
        /*
-        * it may have some screwed PEBS bits
+        * it may have some invalid PEBS bits
         */
-       if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) {
-               pr_warning("P4 PMU: PEBS are not supported yet\n");
+       if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE))
                return -EINVAL;
-       }
+
        v = p4_config_unpack_metric(event->attr.config);
-       if (v >= ARRAY_SIZE(p4_pebs_bind_map)) {
-               pr_warning("P4 PMU: Unknown metric code: %d\n", v);
+       if (v >= ARRAY_SIZE(p4_pebs_bind_map))
                return -EINVAL;
-       }
 
        return 0;
 }
@@ -478,27 +728,21 @@ static int p4_hw_config(struct perf_event *event)
 
        if (event->attr.type == PERF_TYPE_RAW) {
 
+               /*
+                * Clear bits we reserve to be managed by kernel itself
+                * and never allowed from a user space
+                */
+                event->attr.config &= P4_CONFIG_MASK;
+
                rc = p4_validate_raw_event(event);
                if (rc)
                        goto out;
 
                /*
-                * We don't control raw events so it's up to the caller
-                * to pass sane values (and we don't count the thread number
-                * on HT machine but allow HT-compatible specifics to be
-                * passed on)
-                *
                 * Note that for RAW events we allow user to use P4_CCCR_RESERVED
                 * bits since we keep additional info here (for cache events and etc)
-                *
-                * XXX: HT wide things should check perf_paranoid_cpu() &&
-                *      CAP_SYS_ADMIN
                 */
-               event->hw.config |= event->attr.config &
-                       (p4_config_pack_escr(P4_ESCR_MASK_HT) |
-                        p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED));
-
-               event->hw.config &= ~P4_CCCR_FORCE_OVF;
+               event->hw.config |= event->attr.config;
        }
 
        rc = x86_setup_perfctr(event);
@@ -660,8 +904,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
                int overflow;
 
-               if (!test_bit(idx, cpuc->active_mask))
+               if (!test_bit(idx, cpuc->active_mask)) {
+                       /* catch in-flight IRQs */
+                       if (__test_and_clear_bit(idx, cpuc->running))
+                               handled++;
                        continue;
+               }
 
                event = cpuc->events[idx];
                hwc = &event->hw;
index 34b4dad6f0b8e35a0fe80d01f8708c230592144d..d49079515122a6f47d2731cb1d2ac2f03efaf6e0 100644 (file)
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
        const struct cpuid_bit *cb;
 
        static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
+               { X86_FEATURE_DTS,              CR_EAX, 0, 0x00000006, 0 },
                { X86_FEATURE_IDA,              CR_EAX, 1, 0x00000006, 0 },
                { X86_FEATURE_ARAT,             CR_EAX, 2, 0x00000006, 0 },
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
index cd37469b54eeed3fc479d2c931d8d2ed933ea411..3afb33f14d2d2c86a3c961d87aaae531d2631ac8 100644 (file)
@@ -257,14 +257,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
        return mod_code_status;
 }
 
-
-
-
-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
-
 static unsigned char *ftrace_nop_replace(void)
 {
-       return ftrace_nop;
+       return ideal_nop5;
 }
 
 static int
@@ -338,62 +333,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
 
 int __init ftrace_dyn_arch_init(void *data)
 {
-       extern const unsigned char ftrace_test_p6nop[];
-       extern const unsigned char ftrace_test_nop5[];
-       extern const unsigned char ftrace_test_jmp[];
-       int faulted = 0;
-
-       /*
-        * There is no good nop for all x86 archs.
-        * We will default to using the P6_NOP5, but first we
-        * will test to make sure that the nop will actually
-        * work on this CPU. If it faults, we will then
-        * go to a lesser efficient 5 byte nop. If that fails
-        * we then just use a jmp as our nop. This isn't the most
-        * efficient nop, but we can not use a multi part nop
-        * since we would then risk being preempted in the middle
-        * of that nop, and if we enabled tracing then, it might
-        * cause a system crash.
-        *
-        * TODO: check the cpuid to determine the best nop.
-        */
-       asm volatile (
-               "ftrace_test_jmp:"
-               "jmp ftrace_test_p6nop\n"
-               "nop\n"
-               "nop\n"
-               "nop\n"  /* 2 byte jmp + 3 bytes */
-               "ftrace_test_p6nop:"
-               P6_NOP5
-               "jmp 1f\n"
-               "ftrace_test_nop5:"
-               ".byte 0x66,0x66,0x66,0x66,0x90\n"
-               "1:"
-               ".section .fixup, \"ax\"\n"
-               "2:     movl $1, %0\n"
-               "       jmp ftrace_test_nop5\n"
-               "3:     movl $2, %0\n"
-               "       jmp 1b\n"
-               ".previous\n"
-               _ASM_EXTABLE(ftrace_test_p6nop, 2b)
-               _ASM_EXTABLE(ftrace_test_nop5, 3b)
-               : "=r"(faulted) : "0" (faulted));
-
-       switch (faulted) {
-       case 0:
-               pr_info("converting mcount calls to 0f 1f 44 00 00\n");
-               memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
-               break;
-       case 1:
-               pr_info("converting mcount calls to 66 66 66 66 90\n");
-               memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
-               break;
-       case 2:
-               pr_info("converting mcount calls to jmp . + 5\n");
-               memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
-               break;
-       }
-
        /* The return code is retured via data */
        *(unsigned long *)data = 0;
 
index 410fdb3f1939ba98cfabea61071d0ffe403ef1a3..7494999141b3ae9c9ab67302481a2e6c15e969e6 100644 (file)
@@ -506,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
 {
        unsigned int irq;
 
-       irq = create_irq();
+       irq = create_irq_nr(0, -1);
        if (!irq)
                return -EINVAL;
 
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
new file mode 100644 (file)
index 0000000..961b6b3
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * jump label x86 support
+ *
+ * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
+ *
+ */
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/cpu.h>
+#include <asm/kprobes.h>
+#include <asm/alternative.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+union jump_code_union {
+       char code[JUMP_LABEL_NOP_SIZE];
+       struct {
+               char jump;
+               int offset;
+       } __attribute__((packed));
+};
+
+void arch_jump_label_transform(struct jump_entry *entry,
+                              enum jump_label_type type)
+{
+       union jump_code_union code;
+
+       if (type == JUMP_LABEL_ENABLE) {
+               code.jump = 0xe9;
+               code.offset = entry->target -
+                               (entry->code + JUMP_LABEL_NOP_SIZE);
+       } else
+               memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE);
+       get_online_cpus();
+       mutex_lock(&text_mutex);
+       text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
+       mutex_unlock(&text_mutex);
+       put_online_cpus();
+}
+
+void arch_jump_label_text_poke_early(jump_label_t addr)
+{
+       text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE);
+}
+
+#endif
index 770ebfb349e93efe3367cf0c6caff93b61b8b884..1cbd54c0df99189548a3a03f40fbb75a1703475a 100644 (file)
@@ -230,9 +230,6 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
        return 0;
 }
 
-/* Dummy buffers for kallsyms_lookup */
-static char __dummy_buf[KSYM_NAME_LEN];
-
 /* Check if paddr is at an instruction boundary */
 static int __kprobes can_probe(unsigned long paddr)
 {
@@ -241,7 +238,7 @@ static int __kprobes can_probe(unsigned long paddr)
        struct insn insn;
        kprobe_opcode_t buf[MAX_INSN_SIZE];
 
-       if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf))
+       if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
                return 0;
 
        /* Decode instructions */
@@ -1129,7 +1126,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
        *(unsigned long *)addr = val;
 }
 
-void __kprobes kprobes_optinsn_template_holder(void)
+static void __used __kprobes kprobes_optinsn_template_holder(void)
 {
        asm volatile (
                        ".global optprobe_template_entry\n"
@@ -1221,7 +1218,8 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
        }
        /* Check whether the address range is reserved */
        if (ftrace_text_reserved(src, src + len - 1) ||
-           alternatives_text_reserved(src, src + len - 1))
+           alternatives_text_reserved(src, src + len - 1) ||
+           jump_label_text_reserved(src, src + len - 1))
                return -EBUSY;
 
        return len;
@@ -1269,11 +1267,9 @@ static int __kprobes can_optimize(unsigned long paddr)
        unsigned long addr, size = 0, offset = 0;
        struct insn insn;
        kprobe_opcode_t buf[MAX_INSN_SIZE];
-       /* Dummy buffers for lookup_symbol_attrs */
-       static char __dummy_buf[KSYM_NAME_LEN];
 
        /* Lookup symbol including addr */
-       if (!kallsyms_lookup(paddr, &size, &offset, NULL, __dummy_buf))
+       if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
                return 0;
 
        /* Check there is enough space for a relative jump. */
index e0bc186d7501f123265ae288ae071e772016e89b..8f295609173524cdf06a20c6c0ad4fbe1de12263 100644 (file)
@@ -239,11 +239,13 @@ int module_finalize(const Elf_Ehdr *hdr,
                apply_paravirt(pseg, pseg + para->sh_size);
        }
 
-       return module_bug_finalize(hdr, sechdrs, me);
+       /* make jump label nops */
+       jump_label_apply_nops(me);
+
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
        alternatives_smp_module_del(mod);
-       module_bug_cleanup(mod);
 }
index c3a4fbb2b996d00277d6523cb76b74e2c5944621..00e167870f714ab0799838865eec35bd13ce5601 100644 (file)
 #include <asm/numa_64.h>
 #endif
 #include <asm/mce.h>
+#include <asm/alternative.h>
 
 /*
  * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -726,6 +727,7 @@ void __init setup_arch(char **cmdline_p)
 {
        int acpi = 0;
        int k8 = 0;
+       unsigned long flags;
 
 #ifdef CONFIG_X86_32
        memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
@@ -1071,6 +1073,10 @@ void __init setup_arch(char **cmdline_p)
        x86_init.oem.banner();
 
        mcheck_init();
+
+       local_irq_save(flags);
+       arch_init_ideal_nop5();
+       local_irq_restore(flags);
 }
 
 #ifdef CONFIG_X86_32
index 4c4508e8a2043015c1cce3eb49a46c0c435e9297..a24c6cfdccc47da8a12f16b2cd4a09caaf825b9a 100644 (file)
@@ -251,6 +251,8 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
        if (!(address >= VMALLOC_START && address < VMALLOC_END))
                return -1;
 
+       WARN_ON_ONCE(in_nmi());
+
        /*
         * Synchronize this task's top level page-table
         * with the 'reference' page table.
@@ -369,6 +371,8 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
        if (!(address >= VMALLOC_START && address < VMALLOC_END))
                return -1;
 
+       WARN_ON_ONCE(in_nmi());
+
        /*
         * Copy kernel mappings over when needed. This can also
         * happen within a race in page table update. In the later
index b3b531a4f8e587e3560b2087e9c483b2cb1b0f93..d87dd6d042d64309fedac698dd1a5b28a8448594 100644 (file)
@@ -631,6 +631,8 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
        if (!pte)
                return false;
 
+       WARN_ON_ONCE(in_nmi());
+
        if (error_code & 2)
                kmemcheck_access(regs, address, KMEMCHECK_WRITE);
        else
index 1a5353a753fcd10e1330c03d1af43a6fe9c5a5b7..b2bb5aa3b0540e42847a7664aa529cf5d2c0fb83 100644 (file)
@@ -489,8 +489,9 @@ static void xen_hvm_setup_cpu_clockevents(void)
 __init void xen_hvm_init_time_ops(void)
 {
        /* vector callback is needed otherwise we cannot receive interrupts
-        * on cpu > 0 */
-       if (!xen_have_vector_callback && num_present_cpus() > 1)
+        * on cpu > 0 and at this point we don't know how many cpus are
+        * available */
+       if (!xen_have_vector_callback)
                return;
        if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
                printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
index 3b0cd4249671d9826b42ea10473a11486c403368..eafc94f68d79f2ceb1c0a89fce8e449c3a5ff3f6 100644 (file)
@@ -361,6 +361,18 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        if (!rq_mergeable(req) || !rq_mergeable(next))
                return 0;
 
+       /*
+        * Don't merge file system requests and discard requests
+        */
+       if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
+               return 0;
+
+       /*
+        * Don't merge discard requests and secure discard requests
+        */
+       if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+               return 0;
+
        /*
         * not contiguous
         */
index b811f2173f6f167c84700040fe2720bafb43156f..88681aca88c581891399e18b056d9a6df94a9e01 100644 (file)
@@ -105,7 +105,7 @@ config ACPI_EC_DEBUGFS
 
          Be aware that using this interface can confuse your Embedded
          Controller in a way that a normal reboot is not enough. You then
-         have to power of your system, and remove the laptop battery for
+         have to power off your system, and remove the laptop battery for
          some seconds.
          An Embedded Controller typically is available on laptops and reads
          sensor values like battery state and temperature.
index b76848c80be34729c56bd20d04f75f9f534cbb36..6b115f6c43133c211acd79c68a83785506a4357a 100644 (file)
@@ -382,31 +382,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
        device_remove_file(&device->dev, &dev_attr_rrtime);
 }
 
-/* Query firmware how many CPUs should be idle */
-static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
+/*
+ * Query firmware how many CPUs should be idle
+ * return -1 on failure
+ */
+static int acpi_pad_pur(acpi_handle handle)
 {
        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
        union acpi_object *package;
-       int rev, num, ret = -EINVAL;
+       int num = -1;
 
        if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
-               return -EINVAL;
+               return num;
 
        if (!buffer.length || !buffer.pointer)
-               return -EINVAL;
+               return num;
 
        package = buffer.pointer;
-       if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
-               goto out;
-       rev = package->package.elements[0].integer.value;
-       num = package->package.elements[1].integer.value;
-       if (rev != 1 || num < 0)
-               goto out;
-       *num_cpus = num;
-       ret = 0;
-out:
+
+       if (package->type == ACPI_TYPE_PACKAGE &&
+               package->package.count == 2 &&
+               package->package.elements[0].integer.value == 1) /* rev 1 */
+
+               num = package->package.elements[1].integer.value;
+
        kfree(buffer.pointer);
-       return ret;
+       return num;
 }
 
 /* Notify firmware how many CPUs are idle */
@@ -433,7 +434,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
        uint32_t idle_cpus;
 
        mutex_lock(&isolated_cpus_lock);
-       if (acpi_pad_pur(handle, &num_cpus)) {
+       num_cpus = acpi_pad_pur(handle);
+       if (num_cpus < 0) {
                mutex_unlock(&isolated_cpus_lock);
                return;
        }
index df85b53a674fc33105fa1434b3800db7ab26a423..7dad9160f20998112cc302d0a239c9fa27fcd429 100644 (file)
@@ -854,6 +854,7 @@ struct acpi_bit_register_info {
        ACPI_BITMASK_POWER_BUTTON_STATUS   | \
        ACPI_BITMASK_SLEEP_BUTTON_STATUS   | \
        ACPI_BITMASK_RT_CLOCK_STATUS       | \
+       ACPI_BITMASK_PCIEXP_WAKE_DISABLE   | \
        ACPI_BITMASK_WAKE_STATUS)
 
 #define ACPI_BITMASK_TIMER_ENABLE               0x0001
index 74c24d517f81768a6a00418db71c9f93be335b2e..4093522eed45692012b5da617c57187ef20c4458 100644 (file)
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
  *
  * DESCRIPTION: Reacquire the interpreter execution region from within the
  *              interpreter code. Failure to enter the interpreter region is a
- *              fatal system error. Used in  conjuction with
+ *              fatal system error. Used in  conjunction with
  *              relinquish_interpreter
  *
  ******************************************************************************/
index 22cfcfbd9fff77cd4cd5bb77be80c244461b2df5..491191e6cf692bffe1811a22674a0eb0720c3771 100644 (file)
@@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
 
                        /*
                         * 16-, 32-, and 64-bit cases must use the move macros that perform
-                        * endian conversion and/or accomodate hardware that cannot perform
+                        * endian conversion and/or accommodate hardware that cannot perform
                         * misaligned memory transfers
                         */
                case ACPI_RSC_MOVE16:
index 907e350f1c7df58370cb0e68a399b91d903ec466..fca34ccfd294a782e3f05312d99d5b5fdef5bfb4 100644 (file)
@@ -34,6 +34,6 @@ config ACPI_APEI_ERST_DEBUG
        depends on ACPI_APEI
        help
          ERST is a way provided by APEI to save and retrieve hardware
-         error infomation to and from a persistent store. Enable this
+         error information to and from a persistent store. Enable this
          if you want to debugging and testing the ERST kernel support
          and firmware implementation.
index 73fd0c7487c1ae0b1311f4d7e1c048ddc809153a..4a904a4bf05f83a1b952ec2365811f8c5db931e1 100644 (file)
@@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub);
 int apei_resources_request(struct apei_resources *resources,
                           const char *desc)
 {
-       struct apei_res *res, *res_bak;
+       struct apei_res *res, *res_bak = NULL;
        struct resource *r;
+       int rc;
 
-       apei_resources_sub(resources, &apei_resources_all);
+       rc = apei_resources_sub(resources, &apei_resources_all);
+       if (rc)
+               return rc;
 
+       rc = -EINVAL;
        list_for_each_entry(res, &resources->iomem, list) {
                r = request_mem_region(res->start, res->end - res->start,
                                       desc);
@@ -475,7 +479,11 @@ int apei_resources_request(struct apei_resources *resources,
                }
        }
 
-       apei_resources_merge(&apei_resources_all, resources);
+       rc = apei_resources_merge(&apei_resources_all, resources);
+       if (rc) {
+               pr_err(APEI_PFX "Fail to merge resources!\n");
+               goto err_unmap_ioport;
+       }
 
        return 0;
 err_unmap_ioport:
@@ -491,12 +499,13 @@ err_unmap_iomem:
                        break;
                release_mem_region(res->start, res->end - res->start);
        }
-       return -EINVAL;
+       return rc;
 }
 EXPORT_SYMBOL_GPL(apei_resources_request);
 
 void apei_resources_release(struct apei_resources *resources)
 {
+       int rc;
        struct apei_res *res;
 
        list_for_each_entry(res, &resources->iomem, list)
@@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources)
        list_for_each_entry(res, &resources->ioport, list)
                release_region(res->start, res->end - res->start);
 
-       apei_resources_sub(&apei_resources_all, resources);
+       rc = apei_resources_sub(&apei_resources_all, resources);
+       if (rc)
+               pr_err(APEI_PFX "Fail to sub resources!\n");
 }
 EXPORT_SYMBOL_GPL(apei_resources_release);
 
index 465c885938ee89a45db10f1f39d1ceb8cba41cf9..cf29df69380b8dd32585a074c53efc7af7a65471 100644 (file)
@@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
 
 static int einj_check_table(struct acpi_table_einj *einj_tab)
 {
-       if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+       if ((einj_tab->header_length !=
+            (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))
+           && (einj_tab->header_length != sizeof(struct acpi_table_einj)))
                return -EINVAL;
        if (einj_tab->header.length < sizeof(struct acpi_table_einj))
                return -EINVAL;
index 5281ddda2777c99c40f0830c1d4e5022cc5b37aa..da1228a9a544f026bab6c549e65892d439367266 100644 (file)
@@ -2,7 +2,7 @@
  * APEI Error Record Serialization Table debug support
  *
  * ERST is a way provided by APEI to save and retrieve hardware error
- * infomation to and from a persistent store. This file provide the
+ * information to and from a persistent store. This file provide the
  * debugging/testing support for ERST kernel support and firmware
  * implementation.
  *
@@ -111,11 +111,13 @@ retry:
                goto out;
        }
        if (len > erst_dbg_buf_len) {
-               kfree(erst_dbg_buf);
+               void *p;
                rc = -ENOMEM;
-               erst_dbg_buf = kmalloc(len, GFP_KERNEL);
-               if (!erst_dbg_buf)
+               p = kmalloc(len, GFP_KERNEL);
+               if (!p)
                        goto out;
+               kfree(erst_dbg_buf);
+               erst_dbg_buf = p;
                erst_dbg_buf_len = len;
                goto retry;
        }
@@ -150,11 +152,13 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
        if (mutex_lock_interruptible(&erst_dbg_mutex))
                return -EINTR;
        if (usize > erst_dbg_buf_len) {
-               kfree(erst_dbg_buf);
+               void *p;
                rc = -ENOMEM;
-               erst_dbg_buf = kmalloc(usize, GFP_KERNEL);
-               if (!erst_dbg_buf)
+               p = kmalloc(usize, GFP_KERNEL);
+               if (!p)
                        goto out;
+               kfree(erst_dbg_buf);
+               erst_dbg_buf = p;
                erst_dbg_buf_len = usize;
        }
        rc = copy_from_user(erst_dbg_buf, ubuf, usize);
index 18645f4e83cdd2f22d526276b320dac631be8940..1211c03149e8c7c258fee89dd1109e1e6b901c26 100644 (file)
@@ -2,7 +2,7 @@
  * APEI Error Record Serialization Table support
  *
  * ERST is a way provided by APEI to save and retrieve hardware error
- * infomation to and from a persistent store.
+ * information to and from a persistent store.
  *
  * For more information about ERST, please refer to ACPI Specification
  * version 4.0, section 17.4.
@@ -266,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
 {
        int rc;
        u64 offset;
+       void *src, *dst;
+
+       /* ioremap does not work in interrupt context */
+       if (in_interrupt()) {
+               pr_warning(ERST_PFX
+                          "MOVE_DATA can not be used in interrupt context");
+               return -EBUSY;
+       }
 
        rc = __apei_exec_read_register(entry, &offset);
        if (rc)
                return rc;
-       memmove((void *)ctx->dst_base + offset,
-               (void *)ctx->src_base + offset,
-               ctx->var2);
+
+       src = ioremap(ctx->src_base + offset, ctx->var2);
+       if (!src)
+               return -ENOMEM;
+       dst = ioremap(ctx->dst_base + offset, ctx->var2);
+       if (!dst)
+               return -ENOMEM;
+
+       memmove(dst, src, ctx->var2);
+
+       iounmap(src);
+       iounmap(dst);
 
        return 0;
 }
@@ -750,7 +767,9 @@ __setup("erst_disable", setup_erst_disable);
 
 static int erst_check_table(struct acpi_table_erst *erst_tab)
 {
-       if (erst_tab->header_length != sizeof(struct acpi_table_erst))
+       if ((erst_tab->header_length !=
+            (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
+           && (erst_tab->header_length != sizeof(struct acpi_table_einj)))
                return -EINVAL;
        if (erst_tab->header.length < sizeof(struct acpi_table_erst))
                return -EINVAL;
index 385a6059714a72dd32256685a2606586b92d771e..0d505e59214df73dfb40b67b7d5fc45a2d48e127 100644 (file)
@@ -302,7 +302,7 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
        struct ghes *ghes = NULL;
        int rc = -EINVAL;
 
-       generic = ghes_dev->dev.platform_data;
+       generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
        if (!generic->enabled)
                return -ENODEV;
 
index 343168d1826626c202171c3a53c5b5e4c2090a65..1a3508a7fe03f157c2e0144727bbeb5c244d2a46 100644 (file)
@@ -137,20 +137,23 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data)
 
 static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
 {
-       struct acpi_hest_generic *generic;
        struct platform_device *ghes_dev;
        struct ghes_arr *ghes_arr = data;
        int rc;
 
        if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
                return 0;
-       generic = (struct acpi_hest_generic *)hest_hdr;
-       if (!generic->enabled)
+
+       if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
                return 0;
        ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
        if (!ghes_dev)
                return -ENOMEM;
-       ghes_dev->dev.platform_data = generic;
+
+       rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *));
+       if (rc)
+               goto err;
+
        rc = platform_device_add(ghes_dev);
        if (rc)
                goto err;
index 8f8bd736d4ff11919656e79d2ef9442b037fafff..542e5390389120de7ecd7da4cb7bd059baa18b01 100644 (file)
@@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
        list_add_tail_rcu(&map->list, &acpi_iomaps);
        spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
 
-       return vaddr + (paddr - pg_off);
+       return map->vaddr + (paddr - map->paddr);
 err_unmap:
        iounmap(vaddr);
        return NULL;
index dc58402b0a177a4e03e8dc54e7a094804edc1d36..98417201e9ce3881257354e7c2a360ee019a4e39 100644 (file)
@@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_CYCLE_COUNT,
        POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
-       POWER_SUPPLY_PROP_CURRENT_NOW,
        POWER_SUPPLY_PROP_POWER_NOW,
        POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
        POWER_SUPPLY_PROP_ENERGY_FULL,
index 2bb28b9d91c4c2643106ed0d63069ae85799d136..f7619600270a6c6279986fed3ec6b115a59893f3 100644 (file)
@@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
 {
        printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
        acpi_osi_setup("!Windows 2006");
+       acpi_osi_setup("!Windows 2006 SP1");
+       acpi_osi_setup("!Windows 2006 SP2");
        return 0;
 }
 static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
@@ -226,6 +228,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                },
        },
        {
+       .callback = dmi_disable_osi_vista,
+       .ident = "Toshiba Satellite L355",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
+               },
+       },
+       {
        .callback = dmi_disable_osi_win7,
        .ident = "ASUS K50IJ",
        .matches = {
@@ -233,6 +243,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
                },
        },
+       {
+       .callback = dmi_disable_osi_vista,
+       .ident = "Toshiba P305D",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
index 5c221ab535d5b0a459516a16034f80a94a7d1bb4..310e3b9749cbbacdabb3c03d288a6b42874aa41b 100644 (file)
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir);
 static int set_power_nocheck(const struct dmi_system_id *id)
 {
        printk(KERN_NOTICE PREFIX "%s detected - "
-               "disable power check in power transistion\n", id->ident);
+               "disable power check in power transition\n", id->ident);
        acpi_power_nocheck = 1;
        return 0;
 }
@@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
 
 static struct dmi_system_id dsdt_dmi_table[] __initdata = {
        /*
-        * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
+        * Invoke DSDT corruption work-around on all Toshiba Satellite.
         * https://bugzilla.kernel.org/show_bug.cgi?id=14679
         */
        {
         .callback = set_copy_dsdt,
-        .ident = "TOSHIBA Satellite A505",
+        .ident = "TOSHIBA Satellite",
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
-               },
-       },
-       {
-        .callback = set_copy_dsdt,
-        .ident = "TOSHIBA Satellite L505D",
-        .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
                },
        },
        {}
@@ -1027,7 +1019,7 @@ static int __init acpi_init(void)
 
        /*
         * If the laptop falls into the DMI check table, the power state check
-        * will be disabled in the course of device power transistion.
+        * will be disabled in the course of device power transition.
         */
        dmi_check_system(power_nocheck_dmi_table);
 
index 8a3b840c0bb268d0580cd5550c41986bf3c09662..d94d2953c9740f34675eeb576e00e74ee621bfd2 100644 (file)
@@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void)
 
        acpi_bus_unregister_driver(&acpi_fan_driver);
 
+#ifdef CONFIG_ACPI_PROCFS
        remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
+#endif
 
        return;
 }
index e9699aaed1092874b0f275d2ca86a8142850db1c..b618f888d66b48f91365e1e4d8ef0b4d67dca81a 100644 (file)
@@ -28,12 +28,6 @@ static int set_no_mwait(const struct dmi_system_id *id)
 }
 
 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
-       {
-       set_no_mwait, "IFL91 board", {
-       DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
-       DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
-       DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
-       DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
        {
        set_no_mwait, "Extensa 5220", {
        DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
index 15602189238942cc9db03b9d9f25c18823128ea0..347eb21b235302d44d0c5e2d768ce3a8870142f2 100644 (file)
@@ -850,7 +850,7 @@ static int __init acpi_processor_init(void)
                printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
                        acpi_idle_driver.name);
        } else {
-               printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
+               printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
                        cpuidle_get_driver()->name);
        }
 
index ba1bd263d903094692c3683c9557e8b919d1d985..3a73a93596e88a29c1e66fabec91dd2d0db96f6c 100644 (file)
@@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module)
        if (!try_module_get(calling_module))
                return -EINVAL;
 
-       /* is_done is set to negative if an error occured,
-        * and to postitive if _no_ error occured, but SMM
+       /* is_done is set to negative if an error occurred,
+        * and to postitive if _no_ error occurred, but SMM
         * was already notified. This avoids double notification
         * which might lead to unexpected results...
         */
index cf82989ae7568c3c54ded69d16829a2851119d80..4754ff6e70e6daa25b13df6f115745eb7bfb7e55 100644 (file)
@@ -363,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init init_nvs_nosave(const struct dmi_system_id *d)
+{
+       acpi_nvs_nosave();
+       return 0;
+}
+
 static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
        {
        .callback = init_old_suspend_ordering,
@@ -397,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
                DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
                },
        },
+       {
+       .callback = init_nvs_nosave,
+       .ident = "Sony Vaio VGN-SR11M",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
+               },
+       },
+       {
+       .callback = init_nvs_nosave,
+       .ident = "Everex StepNote Series",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
+               },
+       },
        {},
 };
 #endif /* CONFIG_SUSPEND */
index 68e2e4582fa2f18968058c8125704a23f151cb62..f8588f81048ac989d6af1d727693a234f54bc27b 100644 (file)
@@ -100,7 +100,7 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
        ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
 };
 
-static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
+static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
 {
        int result = 0;
        int i;
@@ -128,7 +128,7 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
        return result;
 }
 
-static int param_get_debug_level(char *buffer, struct kernel_param *kp)
+static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
 {
        int result = 0;
        int i;
@@ -149,10 +149,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp)
        return result;
 }
 
-module_param_call(debug_layer, param_set_uint, param_get_debug_layer,
-                 &acpi_dbg_layer, 0644);
-module_param_call(debug_level, param_set_uint, param_get_debug_level,
-                 &acpi_dbg_level, 0644);
+static struct kernel_param_ops param_ops_debug_layer = {
+       .set = param_set_uint,
+       .get = param_get_debug_layer,
+};
+
+static struct kernel_param_ops param_ops_debug_level = {
+       .set = param_set_uint,
+       .get = param_get_debug_level,
+};
+
+module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
+module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
 
 static char trace_method_name[6];
 module_param_string(trace_method_name, trace_method_name, 6, 0644);
index c5fef01b3c9591701fb03ae86290a6b5b643c32a..b836761265988590cea46dbcffc39d30ea3c5578 100644 (file)
@@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
                                  "support\n"));
                *cap |= ACPI_VIDEO_BACKLIGHT;
                if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
-                       printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness "
-                                       "control misses _BQC function\n");
+                       printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
+                               "cannot determine initial brightness\n");
                /* We have backlight support, no need to scan further */
                return AE_CTRL_TERMINATE;
        }
index ff1c945fba98e39de55b42ce46a5e9af26b34c45..99d0e5a511482215a7b3c59ffaed9f0827cfa2b1 100644 (file)
@@ -90,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
 static int ahci_pci_device_resume(struct pci_dev *pdev);
 #endif
 
+static struct scsi_host_template ahci_sht = {
+       AHCI_SHT("ahci"),
+};
+
 static struct ata_port_operations ahci_vt8251_ops = {
        .inherits               = &ahci_ops,
        .hardreset              = ahci_vt8251_hardreset,
index 474427b6f99f47a8dd997042cc9f2d271a743228..e5fdeebf9ef0610d6f43999baefc4fd06fcd354b 100644 (file)
@@ -298,7 +298,17 @@ struct ahci_host_priv {
 
 extern int ahci_ignore_sss;
 
-extern struct scsi_host_template ahci_sht;
+extern struct device_attribute *ahci_shost_attrs[];
+extern struct device_attribute *ahci_sdev_attrs[];
+
+#define AHCI_SHT(drv_name)                                             \
+       ATA_NCQ_SHT(drv_name),                                          \
+       .can_queue              = AHCI_MAX_CMDS - 1,                    \
+       .sg_tablesize           = AHCI_MAX_SG,                          \
+       .dma_boundary           = AHCI_DMA_BOUNDARY,                    \
+       .shost_attrs            = ahci_shost_attrs,                     \
+       .sdev_attrs             = ahci_sdev_attrs
+
 extern struct ata_port_operations ahci_ops;
 
 void ahci_save_initial_config(struct device *dev,
index 4e97f33cca4406212b9769c6c13fc11afc02bc4a..84b643270e7af5c49d8a73db78f6e6230ceabf7b 100644 (file)
 #include <linux/ahci_platform.h>
 #include "ahci.h"
 
+static struct scsi_host_template ahci_platform_sht = {
+       AHCI_SHT("ahci_platform"),
+};
+
 static int __init ahci_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -145,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev)
        ahci_print_info(host, "platform");
 
        rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
-                              &ahci_sht);
+                              &ahci_platform_sht);
        if (rc)
                goto err0;
 
index 68dc6785472f86932f769029ae6046a0dffeec47..8eea309ea21231fcb50ff0498a366ff8a8a1dcf7 100644 (file)
@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
                   ahci_read_em_buffer, ahci_store_em_buffer);
 
-static struct device_attribute *ahci_shost_attrs[] = {
+struct device_attribute *ahci_shost_attrs[] = {
        &dev_attr_link_power_management_policy,
        &dev_attr_em_message_type,
        &dev_attr_em_message,
@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
        &dev_attr_em_buffer,
        NULL
 };
+EXPORT_SYMBOL_GPL(ahci_shost_attrs);
 
-static struct device_attribute *ahci_sdev_attrs[] = {
+struct device_attribute *ahci_sdev_attrs[] = {
        &dev_attr_sw_activity,
        &dev_attr_unload_heads,
        NULL
 };
-
-struct scsi_host_template ahci_sht = {
-       ATA_NCQ_SHT("ahci"),
-       .can_queue              = AHCI_MAX_CMDS - 1,
-       .sg_tablesize           = AHCI_MAX_SG,
-       .dma_boundary           = AHCI_DMA_BOUNDARY,
-       .shost_attrs            = ahci_shost_attrs,
-       .sdev_attrs             = ahci_sdev_attrs,
-};
-EXPORT_SYMBOL_GPL(ahci_sht);
+EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
 
 struct ata_port_operations ahci_ops = {
        .inherits               = &sata_pmp_port_ops,
index b1cbeb59bb7622e61f75bc58f373cadf1e822218..37a2bb5950761fcfa6cd5ea16f30b5c74dc51a1e 100644 (file)
@@ -2369,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
        pkt_shrink_pktlist(pd);
 }
 
-static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
+static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
 {
        if (dev_minor >= MAX_WRITERS)
                return NULL;
index 3822b4f49c84a360145a085e329fe0b76df4d47c..7bd7c45b53efc849225f7f0a604c41f2f0a4fded 100644 (file)
@@ -305,6 +305,9 @@ static int num_force_kipmid;
 #ifdef CONFIG_PCI
 static int pci_registered;
 #endif
+#ifdef CONFIG_ACPI
+static int pnp_registered;
+#endif
 #ifdef CONFIG_PPC_OF
 static int of_registered;
 #endif
@@ -2126,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
 {
        struct acpi_device *acpi_dev;
        struct smi_info *info;
-       struct resource *res;
+       struct resource *res, *res_second;
        acpi_handle handle;
        acpi_status status;
        unsigned long long tmp;
@@ -2182,13 +2185,13 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
        info->io.addr_data = res->start;
 
        info->io.regspacing = DEFAULT_REGSPACING;
-       res = pnp_get_resource(dev,
+       res_second = pnp_get_resource(dev,
                               (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
                                        IORESOURCE_IO : IORESOURCE_MEM,
                               1);
-       if (res) {
-               if (res->start > info->io.addr_data)
-                       info->io.regspacing = res->start - info->io.addr_data;
+       if (res_second) {
+               if (res_second->start > info->io.addr_data)
+                       info->io.regspacing = res_second->start - info->io.addr_data;
        }
        info->io.regsize = DEFAULT_REGSPACING;
        info->io.regshift = 0;
@@ -3359,6 +3362,7 @@ static __devinit int init_ipmi_si(void)
 
 #ifdef CONFIG_ACPI
        pnp_register_driver(&ipmi_pnp_driver);
+       pnp_registered = 1;
 #endif
 
 #ifdef CONFIG_DMI
@@ -3526,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void)
                pci_unregister_driver(&ipmi_pci_driver);
 #endif
 #ifdef CONFIG_ACPI
-       pnp_unregister_driver(&ipmi_pnp_driver);
+       if (pnp_registered)
+               pnp_unregister_driver(&ipmi_pnp_driver);
 #endif
 
 #ifdef CONFIG_PPC_OF
index c2408bbe9c2eed3521f4eb21a86c1e9671774534..f508690eb95859ef80e217f68db827daba606f29 100644 (file)
@@ -80,7 +80,7 @@
  * Limiting Performance Impact
  * ---------------------------
  * C states, especially those with large exit latencies, can have a real
- * noticable impact on workloads, which is not acceptable for most sysadmins,
+ * noticeable impact on workloads, which is not acceptable for most sysadmins,
  * and in addition, less performance has a power price of its own.
  *
  * As a general rule of thumb, menu assumes that the following heuristic
index 86c5ae9fde34d3cf0f3f372a33739ccb4286fdf3..411d5bf50fc43cab437dff34d3d9f25dd9928fe0 100644 (file)
@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
 
 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
 {
-       u32 val = (1 << (1 + (chan->idx * 16)));
+       u32 val = ~(1 << (chan->idx * 16));
        dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
        __raw_writel(val, XOR_INTR_CAUSE(chan));
 }
index fb64cf36ba61d0e786ecfeb802f43909ade4f2f2..eb6b54dbb8064a9a5d2e71eb3261132195ff6f8d 100644 (file)
@@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
 
        sh_chan = to_sh_chan(chan);
        param = chan->private;
-       slave_addr = param->config->addr;
 
        /* Someone calling slave DMA on a public channel? */
        if (!param || !sg_len) {
@@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
                return NULL;
        }
 
+       slave_addr = param->config->addr;
+
        /*
         * if (param != NULL), this is a successfully requested slave channel,
         * therefore param->config != NULL too.
index 3630308e7b811a66f398193eae71cc12f5d7383f..6b21e25f7a84cc99ad6ea710b788745992d1170f 100644 (file)
@@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
 {
        int status;
 
+       if (mci->op_state != OP_RUNNING_POLL)
+               return;
+
        status = cancel_delayed_work(&mci->work);
        if (status == 0) {
                debugf0("%s() not canceled, flush the queue\n",
index e0187d16dd7c53fd240b58d62005e4c17df14bc4..0fd5b85a0f756745bd1074ae673e89d6c81a237a 100644 (file)
@@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
        ATTR_COUNTER(0),
        ATTR_COUNTER(1),
        ATTR_COUNTER(2),
+       { .attr = { .name = NULL } }
 };
 
 static struct mcidev_sysfs_group i7core_udimm_counters = {
index 55d03ed050006c3de3a2b7470c4ededeafbc8871..529a0dbe9fc65960e62bc7840320754fb61393e8 100644 (file)
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
  *   user_data: A pointer the data that is copied to the buffer.
  *   size: The Number of bytes to copy.
  */
-extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
-               void __user *user_data, int size)
+int drm_buffer_copy_from_user(struct drm_buffer *buf,
+                             void __user *user_data, int size)
 {
        int nr_pages = size / PAGE_SIZE + 1;
        int idx;
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
 {
        int idx = drm_buffer_index(buf);
        int page = drm_buffer_page(buf);
-       void *obj = 0;
+       void *obj = NULL;
 
        if (idx + objsize <= PAGE_SIZE) {
                obj = &buf->data[page][idx];
index bf92d07510df740d856c173e7dcce292659fe6f7..5663d2719063de9231ca6cc153b63b30422e17aa 100644 (file)
@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
                return -ENOMEM;
 
        kref_init(&obj->refcount);
-       kref_init(&obj->handlecount);
+       atomic_set(&obj->handle_count, 0);
        obj->size = size;
 
        atomic_inc(&dev->object_count);
@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref)
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
-/**
- * Called after the last reference to the object has been lost.
- * Must be called without holding struct_mutex
- *
- * Frees the object
- */
-void
-drm_gem_object_free_unlocked(struct kref *kref)
-{
-       struct drm_gem_object *obj = (struct drm_gem_object *) kref;
-       struct drm_device *dev = obj->dev;
-
-       if (dev->driver->gem_free_object_unlocked != NULL)
-               dev->driver->gem_free_object_unlocked(obj);
-       else if (dev->driver->gem_free_object != NULL) {
-               mutex_lock(&dev->struct_mutex);
-               dev->driver->gem_free_object(obj);
-               mutex_unlock(&dev->struct_mutex);
-       }
-}
-EXPORT_SYMBOL(drm_gem_object_free_unlocked);
-
 static void drm_gem_object_ref_bug(struct kref *list_kref)
 {
        BUG();
@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
  * called before drm_gem_object_free or we'll be touching
  * freed memory
  */
-void
-drm_gem_object_handle_free(struct kref *kref)
+void drm_gem_object_handle_free(struct drm_gem_object *obj)
 {
-       struct drm_gem_object *obj = container_of(kref,
-                                                 struct drm_gem_object,
-                                                 handlecount);
        struct drm_device *dev = obj->dev;
 
        /* Remove any name for this object */
@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
        struct drm_gem_object *obj = vma->vm_private_data;
 
        drm_gem_object_reference(obj);
+
+       mutex_lock(&obj->dev->struct_mutex);
+       drm_vm_open_locked(vma);
+       mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_open);
 
@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
 
-       drm_gem_object_unreference_unlocked(obj);
+       mutex_lock(&obj->dev->struct_mutex);
+       drm_vm_close_locked(vma);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
index 2ef2c78272434dcb6b32dc17fd96e34dd7a8d959..974e970ce3f81ce014170b90ad1b8adc8a1dd5a9 100644 (file)
@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data)
 
        seq_printf(m, "%6d %8zd %7d %8d\n",
                   obj->name, obj->size,
-                  atomic_read(&obj->handlecount.refcount),
+                  atomic_read(&obj->handle_count),
                   atomic_read(&obj->refcount.refcount));
        return 0;
 }
index fda67468e603b6169393b92bc4922afef8b4d8ce..5df450683aab8649511aaa96aaa759452b022fc0 100644 (file)
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
        mutex_unlock(&dev->struct_mutex);
 }
 
-/**
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
+void drm_vm_close_locked(struct vm_area_struct *vma)
 {
        struct drm_file *priv = vma->vm_file->private_data;
        struct drm_device *dev = priv->minor->dev;
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
                  vma->vm_start, vma->vm_end - vma->vm_start);
        atomic_dec(&dev->vma_count);
 
-       mutex_lock(&dev->struct_mutex);
        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
                if (pt->vma == vma) {
                        list_del(&pt->head);
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma)
                        break;
                }
        }
+}
+
+/**
+ * \c close method for all virtual memory types.
+ *
+ * \param vma virtual memory area.
+ *
+ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+ * free it.
+ */
+static void drm_vm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       drm_vm_close_locked(vma);
        mutex_unlock(&dev->struct_mutex);
 }
 
index 61b4caf220fa83bd15815ea0f82b627f2d773727..fb07e73581e84467ab59ebe744e21ff6f712d2ce 100644 (file)
@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
 static const struct file_operations i810_buffer_fops = {
        .open = drm_open,
        .release = drm_release,
-       .unlocked_ioctl = drm_ioctl,
+       .unlocked_ioctl = i810_ioctl,
        .mmap = i810_mmap_buffers,
        .fasync = drm_fasync,
 };
index 671aa18415ac52d17164e79b4c2a9f287b02da0d..cc92c7e6236fbdffb86078290b5b93dffad2cbad 100644 (file)
@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
 static const struct file_operations i830_buffer_fops = {
        .open = drm_open,
        .release = drm_release,
-       .unlocked_ioctl = drm_ioctl,
+       .unlocked_ioctl = i830_ioctl,
        .mmap = i830_mmap_buffers,
        .fasync = drm_fasync,
 };
index 9d67b485303005771a090ea7a3c1e1c6f8b74e9e..c74e4e8006d4b28e02ae9fb44ee11f491e130775 100644 (file)
@@ -1787,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
                }
        }
 
-       div_u64(diff, diff1);
+       diff = div_u64(diff, diff1);
        ret = ((m * diff) + c);
-       div_u64(ret, 10);
+       ret = div_u64(ret, 10);
 
        dev_priv->last_count1 = total_count;
        dev_priv->last_time1 = now;
@@ -1858,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 
        /* More magic constants... */
        diff = diff * 1181;
-       div_u64(diff, diffms * 10);
+       diff = div_u64(diff, diffms * 10);
        dev_priv->gfx_power = diff;
 }
 
index cf4ffbee1c00633a809a624a165a359082386975..90b1d6753b9d493d3ed8d2c45153bf2047b54d8f 100644 (file)
@@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file_priv, obj, &handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(obj);
        if (ret) {
-               drm_gem_object_unreference_unlocked(obj);
                return ret;
        }
 
-       /* Sink the floating reference from kref_init(handlecount) */
-       drm_gem_object_handle_unreference_unlocked(obj);
-
        args->handle = handle;
        return 0;
 }
@@ -471,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
 
-       /* Bounds check source.
-        *
-        * XXX: This could use review for overflow issues...
-        */
-       if (args->offset > obj->size || args->size > obj->size ||
-           args->offset + args->size > obj->size) {
-               drm_gem_object_unreference_unlocked(obj);
-               return -EINVAL;
+       /* Bounds check source.  */
+       if (args->offset > obj->size || args->size > obj->size - args->offset) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (!access_ok(VERIFY_WRITE,
+                      (char __user *)(uintptr_t)args->data_ptr,
+                      args->size)) {
+               ret = -EFAULT;
+               goto err;
        }
 
        if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -490,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                                                        file_priv);
        }
 
+err:
        drm_gem_object_unreference_unlocked(obj);
-
        return ret;
 }
 
@@ -580,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
-       if (!access_ok(VERIFY_READ, user_data, remain))
-               return -EFAULT;
 
 
        mutex_lock(&dev->struct_mutex);
@@ -934,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
 
-       /* Bounds check destination.
-        *
-        * XXX: This could use review for overflow issues...
-        */
-       if (args->offset > obj->size || args->size > obj->size ||
-           args->offset + args->size > obj->size) {
-               drm_gem_object_unreference_unlocked(obj);
-               return -EINVAL;
+       /* Bounds check destination. */
+       if (args->offset > obj->size || args->size > obj->size - args->offset) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (!access_ok(VERIFY_READ,
+                      (char __user *)(uintptr_t)args->data_ptr,
+                      args->size)) {
+               ret = -EFAULT;
+               goto err;
        }
 
        /* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -975,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                DRM_INFO("pwrite failed %d\n", ret);
 #endif
 
+err:
        drm_gem_object_unreference_unlocked(obj);
-
        return ret;
 }
 
@@ -2400,7 +2402,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
                I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
                break;
        case 3:
-               if (obj_priv->fence_reg > 8)
+               if (obj_priv->fence_reg >= 8)
                        fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
                else
        case 2:
@@ -3258,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                                  (int) reloc->offset,
                                  reloc->read_domains,
                                  reloc->write_domain);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
                if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
index e85246ef691ce339ab3ba331c30a6e846b7ead36..5c428fa3e0b34049e94786184b646a98ee87c06d 100644 (file)
@@ -93,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
-       struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
+       struct drm_i915_gem_object *obj_priv;
        struct list_head *render_iter, *bsd_iter;
        int ret = 0;
 
@@ -175,39 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
        return -ENOSPC;
 
 found:
+       /* drm_mm doesn't allow any other other operations while
+        * scanning, therefore store to be evicted objects on a
+        * temporary list. */
        INIT_LIST_HEAD(&eviction_list);
-       list_for_each_entry_safe(obj_priv, tmp_obj_priv,
-                                &unwind_list, evict_list) {
+       while (!list_empty(&unwind_list)) {
+               obj_priv = list_first_entry(&unwind_list,
+                                           struct drm_i915_gem_object,
+                                           evict_list);
                if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
-                       /* drm_mm doesn't allow any other other operations while
-                        * scanning, therefore store to be evicted objects on a
-                        * temporary list. */
                        list_move(&obj_priv->evict_list, &eviction_list);
-               } else
-                       drm_gem_object_unreference(&obj_priv->base);
+                       continue;
+               }
+               list_del(&obj_priv->evict_list);
+               drm_gem_object_unreference(&obj_priv->base);
        }
 
        /* Unbinding will emit any required flushes */
-       list_for_each_entry_safe(obj_priv, tmp_obj_priv,
-                                &eviction_list, evict_list) {
-#if WATCH_LRU
-               DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base);
-#endif
-               ret = i915_gem_object_unbind(&obj_priv->base);
-               if (ret)
-                       return ret;
-
+       while (!list_empty(&eviction_list)) {
+               obj_priv = list_first_entry(&eviction_list,
+                                           struct drm_i915_gem_object,
+                                           evict_list);
+               if (ret == 0)
+                       ret = i915_gem_object_unbind(&obj_priv->base);
+               list_del(&obj_priv->evict_list);
                drm_gem_object_unreference(&obj_priv->base);
        }
 
-       /* The just created free hole should be on the top of the free stack
-        * maintained by drm_mm, so this BUG_ON actually executes in O(1).
-        * Furthermore all accessed data has just recently been used, so it
-        * should be really fast, too. */
-       BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
-                                  alignment, 0));
-
-       return 0;
+       return ret;
 }
 
 int
index b5bf51a4502dc4f4e2ca4d2914673004c931c3b5..979228594599a28ac7737762679f1c97fd5981bf 100644 (file)
@@ -1013,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
                DRM_DEBUG_KMS("vblank wait timed out\n");
 }
 
-/**
- * intel_wait_for_vblank_off - wait for vblank after disabling a pipe
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
  * @dev: drm device
  * @pipe: pipe to wait for
  *
@@ -1022,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
  * spinning on the vblank interrupt status bit, since we won't actually
  * see an interrupt when the pipe is disabled.
  *
- * So this function waits for the display line value to settle (it
- * usually ends up stopping at the start of the next frame).
+ * On Gen4 and above:
+ *   wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ *   wait for the display line value to settle (it usually
+ *   ends up stopping at the start of the next frame).
+ *  
  */
-void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
+static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
-       unsigned long timeout = jiffies + msecs_to_jiffies(100);
-       u32 last_line;
-
-       /* Wait for the display line to settle */
-       do {
-               last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
-               mdelay(5);
-       } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
-                time_after(timeout, jiffies));
-
-       if (time_after(jiffies, timeout))
-               DRM_DEBUG_KMS("vblank wait timed out\n");
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
+
+               /* Wait for the Pipe State to go off */
+               if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
+                            100, 0))
+                       DRM_DEBUG_KMS("pipe_off wait timed out\n");
+       } else {
+               u32 last_line;
+               int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
+               unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+               /* Wait for the display line to settle */
+               do {
+                       last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
+                       mdelay(5);
+               } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
+                        time_after(timeout, jiffies));
+               if (time_after(jiffies, timeout))
+                       DRM_DEBUG_KMS("pipe_off wait timed out\n");
+       }
 }
 
 /* Parameters have changed, update FBC info */
@@ -2328,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_READ(dspbase_reg);
                }
 
-               /* Wait for vblank for the disable to take effect */
-               intel_wait_for_vblank_off(dev, pipe);
-
                /* Don't disable pipe A or pipe A PLLs if needed */
                if (pipeconf_reg == PIPEACONF &&
-                   (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+                   (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
+                       /* Wait for vblank for the disable to take effect */
+                       intel_wait_for_vblank(dev, pipe);
                        goto skip_pipe_off;
+               }
 
                /* Next, disable display pipes */
                temp = I915_READ(pipeconf_reg);
@@ -2343,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_READ(pipeconf_reg);
                }
 
-               /* Wait for vblank for the disable to take effect. */
-               intel_wait_for_vblank_off(dev, pipe);
+               /* Wait for the pipe to turn off */
+               intel_wait_for_pipe_off(dev, pipe);
 
                temp = I915_READ(dpll_reg);
                if ((temp & DPLL_VCO_ENABLE) != 0) {
index 1a51ee07de3e72daf3a3c59ffdb15f70d5c95959..9ab8708ac6ba1370cea75680d6a660daa5f9b147 100644 (file)
@@ -1138,18 +1138,14 @@ static bool
 intel_dp_set_link_train(struct intel_dp *intel_dp,
                        uint32_t dp_reg_value,
                        uint8_t dp_train_pat,
-                       uint8_t train_set[4],
-                       bool first)
+                       uint8_t train_set[4])
 {
        struct drm_device *dev = intel_dp->base.enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
        int ret;
 
        I915_WRITE(intel_dp->output_reg, dp_reg_value);
        POSTING_READ(intel_dp->output_reg);
-       if (first)
-               intel_wait_for_vblank(dev, intel_crtc->pipe);
 
        intel_dp_aux_native_write_1(intel_dp,
                                    DP_TRAINING_PATTERN_SET,
@@ -1174,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp)
        uint8_t voltage;
        bool clock_recovery = false;
        bool channel_eq = false;
-       bool first = true;
        int tries;
        u32 reg;
        uint32_t DP = intel_dp->DP;
+       struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
+
+       /* Enable output, wait for it to become active */
+       I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+       POSTING_READ(intel_dp->output_reg);
+       intel_wait_for_vblank(dev, intel_crtc->pipe);
 
        /* Write the link configuration data */
        intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1210,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp)
                        reg = DP | DP_LINK_TRAIN_PAT_1;
 
                if (!intel_dp_set_link_train(intel_dp, reg,
-                                            DP_TRAINING_PATTERN_1, train_set, first))
+                                            DP_TRAINING_PATTERN_1, train_set))
                        break;
-               first = false;
                /* Set training pattern 1 */
 
                udelay(100);
@@ -1266,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
 
                /* channel eq pattern */
                if (!intel_dp_set_link_train(intel_dp, reg,
-                                            DP_TRAINING_PATTERN_2, train_set,
-                                            false))
+                                            DP_TRAINING_PATTERN_2, train_set))
                        break;
 
                udelay(400);
index ad312ca6b3e570125732168b3c2f670467264beb..8828b3ac6414eabff93134e34a41ae5c38d1cd34 100644 (file)
@@ -229,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
                                                    struct drm_crtc *crtc);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
-extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
index 7bdc96256bf55b6e87d102377b428a871792be91..56ad9df2ccb58925bdd74a2f3b64bac01360f562 100644 (file)
@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev,
        drm_fb_helper_fini(&ifbdev->helper);
 
        drm_framebuffer_cleanup(&ifb->base);
-       if (ifb->obj)
+       if (ifb->obj) {
+               drm_gem_object_handle_unreference(ifb->obj);
                drm_gem_object_unreference(ifb->obj);
+       }
 
        return 0;
 }
index e8e902d614edc8431cd4f17b5650a4682c7b0917..ee73e428a84a800dd8d70a1747457033f7e8da8e 100644 (file)
@@ -2170,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
         return true;
 
 err:
-       intel_sdvo_destroy_enhance_property(connector);
-       kfree(intel_sdvo_connector);
+       intel_sdvo_destroy(connector);
        return false;
 }
 
@@ -2243,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
        return true;
 
 err:
-       intel_sdvo_destroy_enhance_property(connector);
-       kfree(intel_sdvo_connector);
+       intel_sdvo_destroy(connector);
        return false;
 }
 
@@ -2522,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
                uint16_t response;
        } enhancements;
 
-       if (!intel_sdvo_get_value(intel_sdvo,
-                                 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
-                                 &enhancements, sizeof(enhancements)))
-               return false;
-
+       enhancements.response = 0;
+       intel_sdvo_get_value(intel_sdvo,
+                            SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+                            &enhancements, sizeof(enhancements));
        if (enhancements.response == 0) {
                DRM_DEBUG_KMS("No enhancement is supported\n");
                return true;
index 87186a4bbf03da1a964e95cbb17a7a53c3b1a557..fc737037f751c3690dfb09239e3439df1fa4191c 100644 (file)
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
        if (nv_encoder->dcb->type == OUTPUT_LVDS &&
            (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
             dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
-               nv_connector->native_mode = drm_mode_create(dev);
-               nouveau_bios_fp_mode(dev, nv_connector->native_mode);
+               struct drm_display_mode mode;
+
+               nouveau_bios_fp_mode(dev, &mode);
+               nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
        }
 
        /* Find the native mode if this is a digital panel, if we didn't
index dbd30b2e43fd13b39aaa59eb0abc4fb0332bd1ee..d2047713dc59318fe94aff82eccf98bd28dd6841 100644 (file)
@@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
 
        if (nouveau_fb->nvbo) {
                nouveau_bo_unmap(nouveau_fb->nvbo);
+               drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
                drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
                nouveau_fb->nvbo = NULL;
        }
index ead7b8fc53fcbcd473dbdc7a97d893a3e2e9c454..19620a6709f55c00e97efd5d2f816705788420f8 100644 (file)
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
                goto out;
 
        ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(nvbo->gem);
 out:
-       drm_gem_object_handle_unreference_unlocked(nvbo->gem);
-
-       if (ret)
-               drm_gem_object_unreference_unlocked(nvbo->gem);
        return ret;
 }
 
index 3ec181ff50cea7a45b22a28a6ed43857b2a841cb..3c9964a8fbad00eb4a8371f94900432521f1367f 100644 (file)
@@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
        mutex_lock(&dev->struct_mutex);
        nouveau_bo_unpin(chan->notifier_bo);
        mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
        drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
        drm_mm_takedown(&chan->notifier_heap);
 }
index 1bc72c3190a9dcfe9b8f70f16087a231560d9b08..fe359a239df343437cce0b0c79d2692c559ffb9e 100644 (file)
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
 #define SW_I2C_CNTL_WRITE1BIT 6
 
 //==============================VESA definition Portion===============================
-#define VESA_OEM_PRODUCT_REV                               '01.00'
+#define VESA_OEM_PRODUCT_REV                               "01.00"
 #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT            0xBB       //refer to VBE spec p.32, no TTY support
 #define VESA_MODE_WIN_ATTRIBUTE                                                     7
 #define VESA_WIN_SIZE                                                                                       64
index afc18d87fdca7409e4c7462fe3a1b03eeaa6d3ca..7a04959ba0eefacad6ab9894cb1d5732bbf4677e 100644 (file)
@@ -2729,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev)
        if (i < rdev->usec_timeout) {
                DRM_INFO("ib test succeeded in %u usecs\n", i);
        } else {
-               DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+               DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
                          scratch, tmp);
                r = -EINVAL;
        }
@@ -3528,7 +3528,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
        /* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
         * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
         */
-       if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
+       if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+           rdev->vram_scratch.ptr) {
                void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
                u32 tmp;
 
index ebae14c4b768b4413990e84c1055782a72590009..68932ba7b8a47d0e7a360be37fb38f16075e4d3b 100644 (file)
@@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                        *connector_type = DRM_MODE_CONNECTOR_DVID;
        }
 
+       /* MSI K9A2GM V2/V3 board has no HDMI or DVI */
+       if ((dev->pdev->device == 0x796e) &&
+           (dev->pdev->subsystem_vendor == 0x1462) &&
+           (dev->pdev->subsystem_device == 0x7302)) {
+               if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
+                   (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+                       return false;
+       }
+
        /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
        if ((dev->pdev->device == 0x7941) &&
            (dev->pdev->subsystem_vendor == 0x147b) &&
index 127a395f70fb304d6f4d0df613de1fdfc8ca1223..b92d2f2fcbed6a8bd472ce9f4b936aa82f309278 100644 (file)
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev)
                                        DRM_INFO("    DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
                                if (devices & ATOM_DEVICE_DFP5_SUPPORT)
                                        DRM_INFO("    DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_DFP6_SUPPORT)
+                                       DRM_INFO("    DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
                                if (devices & ATOM_DEVICE_TV1_SUPPORT)
                                        DRM_INFO("    TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
                                if (devices & ATOM_DEVICE_CV_SUPPORT)
@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
 
-       if (radeon_fb->obj)
+       if (radeon_fb->obj) {
                drm_gem_object_unreference_unlocked(radeon_fb->obj);
+       }
        drm_framebuffer_cleanup(fb);
        kfree(radeon_fb);
 }
index c74a8b20d9413e921bc6a03cd92578146a9ee8ef..9cdf6a35bc2c3f4efbd5daab2a9506078308aa31 100644 (file)
@@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
        ret = radeon_bo_reserve(rbo, false);
        if (likely(ret == 0)) {
                radeon_bo_kunmap(rbo);
+               radeon_bo_unpin(rbo);
                radeon_bo_unreserve(rbo);
        }
+       drm_gem_object_handle_unreference(gobj);
        drm_gem_object_unreference_unlocked(gobj);
 }
 
@@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
 {
        struct fb_info *info;
        struct radeon_framebuffer *rfb = &rfbdev->rfb;
-       struct radeon_bo *rbo;
-       int r;
 
        if (rfbdev->helper.fbdev) {
                info = rfbdev->helper.fbdev;
@@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
        }
 
        if (rfb->obj) {
-               rbo = rfb->obj->driver_private;
-               r = radeon_bo_reserve(rbo, false);
-               if (likely(r == 0)) {
-                       radeon_bo_kunmap(rbo);
-                       radeon_bo_unpin(rbo);
-                       radeon_bo_unreserve(rbo);
-               }
-               drm_gem_object_unreference_unlocked(rfb->obj);
+               radeonfb_destroy_pinned_object(rfb->obj);
+               rfb->obj = NULL;
        }
        drm_fb_helper_fini(&rfbdev->helper);
        drm_framebuffer_cleanup(&rfb->base);
index c578f265b24cefc6dce21734783b01c1aed1ce27..d1e595d9172396b8104d19c7a1d0a87d3b14b772 100644 (file)
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
                return r;
        }
        r = drm_gem_handle_create(filp, gobj, &handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(gobj);
        if (r) {
-               drm_gem_object_unreference_unlocked(gobj);
                return r;
        }
-       drm_gem_object_handle_unreference_unlocked(gobj);
        args->handle = handle;
        return 0;
 }
index 5eee3c41d124bf49fbd5dfbc7264fb062699e961..8fbbe1c6ebbda854f7bf9dc9f76ae1c4eefdafc5 100644 (file)
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
  */
 int radeon_driver_firstopen_kms(struct drm_device *dev)
 {
+       struct radeon_device *rdev = dev->dev_private;
+
+       if (rdev->powered_down)
+               return -EINVAL;
        return 0;
 }
 
index 7cffb3e0423249ec4f78f7c7cbdc72b6df921e50..3451a82adba76c31672ee96f086146f5da1ab12b 100644 (file)
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        fbo->vm_node = NULL;
+       atomic_set(&fbo->cpu_writers, 0);
 
        fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
        kref_init(&fbo->list_kref);
index ca904799f018a6e3ae23c80933bd1e007aec8f7f..b1e02fffd3ccdebf256d38bb55bed9a37ea1c8d7 100644 (file)
@@ -69,7 +69,7 @@ struct ttm_page_pool {
        spinlock_t              lock;
        bool                    fill_lock;
        struct list_head        list;
-       int                     gfp_flags;
+       gfp_t                   gfp_flags;
        unsigned                npages;
        char                    *name;
        unsigned long           nfrees;
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
  * This function is reentrant if caller updates count depending on number of
  * pages returned in pages array.
  */
-static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
                int ttm_flags, enum ttm_caching_state cstate, unsigned count)
 {
        struct page **caching_array;
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
 {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct page *p = NULL;
-       int gfp_flags = GFP_USER;
+       gfp_t gfp_flags = GFP_USER;
        int r;
 
        /* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
        return 0;
 }
 
-void ttm_page_alloc_fini()
+void ttm_page_alloc_fini(void)
 {
        int i;
 
index 72ec2e2b6e9787196ca1de65f28e4c6a0f090051..a96ed6d9d010b82cfc58ed41ec6240f99d5a9103 100644 (file)
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = {
        {0, 0, 0}
 };
 
-static char *vmw_devname = "vmwgfx";
+static int enable_fbdev;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
                              void *ptr);
 
+MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
+module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+
 static void vmw_print_capabilities(uint32_t capabilities)
 {
        DRM_INFO("Capabilities:\n");
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv)
 {
        int ret;
 
-       vmw_kms_save_vga(dev_priv);
-
        ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Unable to initialize FIFO.\n");
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv)
 static void vmw_release_device(struct vmw_private *dev_priv)
 {
        vmw_fifo_release(dev_priv, &dev_priv->fifo);
-       vmw_kms_restore_vga(dev_priv);
 }
 
+int vmw_3d_resource_inc(struct vmw_private *dev_priv)
+{
+       int ret = 0;
+
+       mutex_lock(&dev_priv->release_mutex);
+       if (unlikely(dev_priv->num_3d_resources++ == 0)) {
+               ret = vmw_request_device(dev_priv);
+               if (unlikely(ret != 0))
+                       --dev_priv->num_3d_resources;
+       }
+       mutex_unlock(&dev_priv->release_mutex);
+       return ret;
+}
+
+
+void vmw_3d_resource_dec(struct vmw_private *dev_priv)
+{
+       int32_t n3d;
+
+       mutex_lock(&dev_priv->release_mutex);
+       if (unlikely(--dev_priv->num_3d_resources == 0))
+               vmw_release_device(dev_priv);
+       n3d = (int32_t) dev_priv->num_3d_resources;
+       mutex_unlock(&dev_priv->release_mutex);
+
+       BUG_ON(n3d < 0);
+}
 
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 {
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->last_read_sequence = (uint32_t) -100;
        mutex_init(&dev_priv->hw_mutex);
        mutex_init(&dev_priv->cmdbuf_mutex);
+       mutex_init(&dev_priv->release_mutex);
        rwlock_init(&dev_priv->resource_lock);
        idr_init(&dev_priv->context_idr);
        idr_init(&dev_priv->surface_idr);
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
        dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 
+       dev_priv->enable_fb = enable_fbdev;
+
        mutex_lock(&dev_priv->hw_mutex);
 
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
        dev->dev_private = dev_priv;
 
-       if (!dev->devname)
-               dev->devname = vmw_devname;
-
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
-               ret = drm_irq_install(dev);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("Failed installing irq: %d\n", ret);
-                       goto out_no_irq;
-               }
-       }
-
        ret = pci_request_regions(dev->pdev, "vmwgfx probe");
        dev_priv->stealth = (ret != 0);
        if (dev_priv->stealth) {
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                        goto out_no_device;
                }
        }
-       ret = vmw_request_device(dev_priv);
+       ret = vmw_kms_init(dev_priv);
        if (unlikely(ret != 0))
-               goto out_no_device;
-       vmw_kms_init(dev_priv);
+               goto out_no_kms;
        vmw_overlay_init(dev_priv);
-       vmw_fb_init(dev_priv);
+       if (dev_priv->enable_fb) {
+               ret = vmw_3d_resource_inc(dev_priv);
+               if (unlikely(ret != 0))
+                       goto out_no_fifo;
+               vmw_kms_save_vga(dev_priv);
+               vmw_fb_init(dev_priv);
+               DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
+                        "Detected device 3D availability.\n" :
+                        "Detected no device 3D availability.\n");
+       } else {
+               DRM_INFO("Delayed 3D detection since we're not "
+                        "running the device in SVGA mode yet.\n");
+       }
+
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+               ret = drm_irq_install(dev);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed installing irq: %d\n", ret);
+                       goto out_no_irq;
+               }
+       }
 
        dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
        register_pm_notifier(&dev_priv->pm_nb);
 
-       DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
-
        return 0;
 
-out_no_device:
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-               drm_irq_uninstall(dev_priv->dev);
-       if (dev->devname == vmw_devname)
-               dev->devname = NULL;
 out_no_irq:
+       if (dev_priv->enable_fb) {
+               vmw_fb_close(dev_priv);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
+out_no_fifo:
+       vmw_overlay_close(dev_priv);
+       vmw_kms_close(dev_priv);
+out_no_kms:
+       if (dev_priv->stealth)
+               pci_release_region(dev->pdev, 2);
+       else
+               pci_release_regions(dev->pdev);
+out_no_device:
        ttm_object_device_release(&dev_priv->tdev);
 out_err4:
        iounmap(dev_priv->mmio_virt);
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev)
 
        unregister_pm_notifier(&dev_priv->pm_nb);
 
-       vmw_fb_close(dev_priv);
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+               drm_irq_uninstall(dev_priv->dev);
+       if (dev_priv->enable_fb) {
+               vmw_fb_close(dev_priv);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
        vmw_kms_close(dev_priv);
        vmw_overlay_close(dev_priv);
-       vmw_release_device(dev_priv);
        if (dev_priv->stealth)
                pci_release_region(dev->pdev, 2);
        else
                pci_release_regions(dev->pdev);
 
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-               drm_irq_uninstall(dev_priv->dev);
-       if (dev->devname == vmw_devname)
-               dev->devname = NULL;
        ttm_object_device_release(&dev_priv->tdev);
        iounmap(dev_priv->mmio_virt);
        drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
                struct drm_ioctl_desc *ioctl =
                    &vmw_ioctls[nr - DRM_COMMAND_BASE];
 
-               if (unlikely(ioctl->cmd != cmd)) {
+               if (unlikely(ioctl->cmd_drv != cmd)) {
                        DRM_ERROR("Invalid command format, ioctl %d\n",
                                  nr - DRM_COMMAND_BASE);
                        return -EINVAL;
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev,
        struct vmw_master *vmaster = vmw_master(file_priv->master);
        int ret = 0;
 
+       if (!dev_priv->enable_fb) {
+               ret = vmw_3d_resource_inc(dev_priv);
+               if (unlikely(ret != 0))
+                       return ret;
+               vmw_kms_save_vga(dev_priv);
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 0);
+               mutex_unlock(&dev_priv->hw_mutex);
+       }
+
        if (active) {
                BUG_ON(active != &dev_priv->fbdev_master);
                ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev,
        return 0;
 
 out_no_active_lock:
-       vmw_release_device(dev_priv);
+       if (!dev_priv->enable_fb) {
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+               mutex_unlock(&dev_priv->hw_mutex);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
        return ret;
 }
 
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev,
 
        ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 
+       if (!dev_priv->enable_fb) {
+               ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+               if (unlikely(ret != 0))
+                       DRM_ERROR("Unable to clean VRAM on master drop.\n");
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+               mutex_unlock(&dev_priv->hw_mutex);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
+
        dev_priv->active_master = &dev_priv->fbdev_master;
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 
-       vmw_fb_on(dev_priv);
+       if (dev_priv->enable_fb)
+               vmw_fb_on(dev_priv);
 }
 
 
@@ -722,6 +796,7 @@ static struct drm_driver driver = {
        .irq_postinstall = vmw_irq_postinstall,
        .irq_uninstall = vmw_irq_uninstall,
        .irq_handler = vmw_irq_handler,
+       .get_vblank_counter = vmw_get_vblank_counter,
        .reclaim_buffers_locked = NULL,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
index 429f917b60bf4b30ecdfd0946f9f1f0978bd5c58..58de6393f611dd79fbbebeab81102dcf0f4abfcb 100644 (file)
@@ -277,6 +277,7 @@ struct vmw_private {
 
        bool stealth;
        bool is_opened;
+       bool enable_fb;
 
        /**
         * Master management.
@@ -285,6 +286,9 @@ struct vmw_private {
        struct vmw_master *active_master;
        struct vmw_master fbdev_master;
        struct notifier_block pm_nb;
+
+       struct mutex release_mutex;
+       uint32_t num_3d_resources;
 };
 
 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
        return val;
 }
 
+int vmw_3d_resource_inc(struct vmw_private *dev_priv);
+void vmw_3d_resource_dec(struct vmw_private *dev_priv);
+
 /**
  * GMR utilities - vmwgfx_gmr.c
  */
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
                        unsigned bbp, unsigned depth);
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
 
 /**
  * Overlay control - vmwgfx_overlay.c
index 870967a97c15d52eb3f380323e6038d32ed6e76f..409e172f4abfe94502be96502e251b5d6b2e54c9 100644 (file)
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
        if (unlikely(ret != 0))
                goto err_unlock;
 
+       if (bo->mem.mem_type == TTM_PL_VRAM &&
+           bo->mem.mm_node->start < bo->num_pages)
+               (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
+                                      false, false);
+
        ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
 
        /* Could probably bug on */
index e6a1eb7ea95498f00e65d123adeae79160af8fa8..0fe31766e4cf5f11e6025e5a85f96baa5936408f 100644 (file)
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        mutex_lock(&dev_priv->hw_mutex);
        dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
        dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+       dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
        vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
 
        min = 4;
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
                  dev_priv->config_done_state);
        vmw_write(dev_priv, SVGA_REG_ENABLE,
                  dev_priv->enable_state);
+       vmw_write(dev_priv, SVGA_REG_TRACES,
+                 dev_priv->traces_state);
 
        mutex_unlock(&dev_priv->hw_mutex);
        vmw_fence_queue_takedown(&fifo->fence_queue);
index 64d7f47df8683ef49cfbb3c83eb449026949af03..e882ba099f0c33dab30f12f7b9328b3b628ac712 100644 (file)
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
                save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
                save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+               if (i == 0 && vmw_priv->num_displays == 1 &&
+                   save->width == 0 && save->height == 0) {
+
+                       /*
+                        * It should be fairly safe to assume that these
+                        * values are uninitialized.
+                        */
+
+                       save->width = vmw_priv->vga_width - save->pos_x;
+                       save->height = vmw_priv->vga_height - save->pos_y;
+               }
        }
+
        return 0;
 }
 
@@ -984,3 +996,8 @@ out_unlock:
        ttm_read_unlock(&vmaster->lock);
        return ret;
 }
+
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+       return 0;
+}
index 7083b1a24df35b99fe8792040ef234d6536d69a2..11cb39e3accbfa9581801095ab0398952d2313f1 100644 (file)
@@ -27,6 +27,8 @@
 
 #include "vmwgfx_kms.h"
 
+#define VMWGFX_LDU_NUM_DU 8
+
 #define vmw_crtc_to_ldu(x) \
        container_of(x, struct vmw_legacy_display_unit, base.crtc)
 #define vmw_encoder_to_ldu(x) \
@@ -536,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
 int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+       int i;
+       int ret;
+
        if (dev_priv->ldu_priv) {
                DRM_INFO("ldu system already on\n");
                return -EINVAL;
@@ -553,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
 
        drm_mode_create_dirty_info_property(dev_priv->dev);
 
-       vmw_ldu_init(dev_priv, 0);
-       /* for old hardware without multimon only enable one display */
        if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
-               vmw_ldu_init(dev_priv, 1);
-               vmw_ldu_init(dev_priv, 2);
-               vmw_ldu_init(dev_priv, 3);
-               vmw_ldu_init(dev_priv, 4);
-               vmw_ldu_init(dev_priv, 5);
-               vmw_ldu_init(dev_priv, 6);
-               vmw_ldu_init(dev_priv, 7);
+               for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
+                       vmw_ldu_init(dev_priv, i);
+               ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
+       } else {
+               /* for old hardware without multimon only enable one display */
+               vmw_ldu_init(dev_priv, 0);
+               ret = drm_vblank_init(dev, 1);
        }
 
-       return 0;
+       return ret;
 }
 
 int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+
+       drm_vblank_cleanup(dev);
        if (!dev_priv->ldu_priv)
                return -ENOSYS;
 
index 5f2d5df01e5c370acbc1be99772701626419daf5..c8c40e9979dbd21442a5d87cf6ac4cdcad08cfad 100644 (file)
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
        cmd->body.cid = cpu_to_le32(res->id);
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       vmw_3d_resource_dec(dev_priv);
 }
 
 static int vmw_context_init(struct vmw_private *dev_priv,
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
        cmd->body.cid = cpu_to_le32(res->id);
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       (void) vmw_3d_resource_inc(dev_priv);
        vmw_resource_activate(res, vmw_hw_context_destroy);
        return 0;
 }
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
        cmd->body.sid = cpu_to_le32(res->id);
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       vmw_3d_resource_dec(dev_priv);
 }
 
 void vmw_surface_res_free(struct vmw_resource *res)
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv,
        }
 
        vmw_fifo_commit(dev_priv, submit_size);
+       (void) vmw_3d_resource_inc(dev_priv);
        vmw_resource_activate(res, vmw_hw_surface_destroy);
        return 0;
 }
index b87569e96b163c04fb35790ef8c457999480e3f3..f366f968155a3ed913ce770a60ca30cbf2f97981 100644 (file)
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
        pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
 }
 
-void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
 {
        struct vga_device *vgadev;
        unsigned long flags;
index 4d4d09bdec0a7a7cb043725b2fb94dc1fff23dc7..97499d00615aacbddcf5963bcfa08b5f4733a1bb 100644 (file)
@@ -409,7 +409,7 @@ config SENSORS_CORETEMP
 
 config SENSORS_PKGTEMP
        tristate "Intel processor package temperature sensor"
-       depends on X86 && PCI && EXPERIMENTAL
+       depends on X86 && EXPERIMENTAL
        help
          If you say yes here you get support for the package level temperature
          sensor inside your CPU. Check documentation/driver for details.
index de8111114f469ec21567a5781349bb0d7f7cbc98..a23b17a78ace8f42cb114b30f593b9019a24a982 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/pci.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
+#include <asm/smp.h>
 
 #define DRVNAME        "coretemp"
 
@@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
        int err;
        struct platform_device *pdev;
        struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
        struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+       /*
+        * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+        * sensors. We check this bit only, all the early CPUs
+        * without thermal sensors will be filtered out.
+        */
+       if (!cpu_has(c, X86_FEATURE_DTS)) {
+               printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
+                      " has no thermal sensor.\n", c->x86_model);
+               return 0;
+       }
 
        mutex_lock(&pdev_list_mutex);
 
@@ -482,14 +492,22 @@ exit:
 
 static void coretemp_device_remove(unsigned int cpu)
 {
-       struct pdev_entry *p, *n;
+       struct pdev_entry *p;
+       unsigned int i;
+
        mutex_lock(&pdev_list_mutex);
-       list_for_each_entry_safe(p, n, &pdev_list, list) {
-               if (p->cpu == cpu) {
-                       platform_device_unregister(p->pdev);
-                       list_del(&p->list);
-                       kfree(p);
-               }
+       list_for_each_entry(p, &pdev_list, list) {
+               if (p->cpu != cpu)
+                       continue;
+
+               platform_device_unregister(p->pdev);
+               list_del(&p->list);
+               mutex_unlock(&pdev_list_mutex);
+               kfree(p);
+               for_each_cpu(i, cpu_sibling_mask(cpu))
+                       if (i != cpu && !coretemp_device_add(i))
+                               break;
+               return;
        }
        mutex_unlock(&pdev_list_mutex);
 }
@@ -527,30 +545,21 @@ static int __init coretemp_init(void)
        if (err)
                goto exit;
 
-       for_each_online_cpu(i) {
-               struct cpuinfo_x86 *c = &cpu_data(i);
-               /*
-                * CPUID.06H.EAX[0] indicates whether the CPU has thermal
-                * sensors. We check this bit only, all the early CPUs
-                * without thermal sensors will be filtered out.
-                */
-               if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
-                       coretemp_device_add(i);
-               else {
-                       printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
-                               " has no thermal sensor.\n", c->x86_model);
-               }
-       }
+       for_each_online_cpu(i)
+               coretemp_device_add(i);
+
+#ifndef CONFIG_HOTPLUG_CPU
        if (list_empty(&pdev_list)) {
                err = -ENODEV;
                goto exit_driver_unreg;
        }
+#endif
 
        register_hotcpu_notifier(&coretemp_cpu_notifier);
        return 0;
 
-exit_driver_unreg:
 #ifndef CONFIG_HOTPLUG_CPU
+exit_driver_unreg:
        platform_driver_unregister(&coretemp_driver);
 #endif
 exit:
index 537841ef44b99d179318f7510dbf28dddedb0ed8..75afb3b0e0763c184a1b22cdc163ef32d10d2969 100644 (file)
@@ -111,7 +111,7 @@ static struct platform_device *f71882fg_pdev;
 /* Super-I/O Function prototypes */
 static inline int superio_inb(int base, int reg);
 static inline int superio_inw(int base, int reg);
-static inline void superio_enter(int base);
+static inline int superio_enter(int base);
 static inline void superio_select(int base, int ld);
 static inline void superio_exit(int base);
 
@@ -861,11 +861,20 @@ static int superio_inw(int base, int reg)
        return val;
 }
 
-static inline void superio_enter(int base)
+static inline int superio_enter(int base)
 {
+       /* Don't step on other drivers' I/O space by accident */
+       if (!request_muxed_region(base, 2, DRVNAME)) {
+               printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
+                               base);
+               return -EBUSY;
+       }
+
        /* according to the datasheet the key must be send twice! */
        outb(SIO_UNLOCK_KEY, base);
        outb(SIO_UNLOCK_KEY, base);
+
+       return 0;
 }
 
 static inline void superio_select(int base, int ld)
@@ -877,6 +886,7 @@ static inline void superio_select(int base, int ld)
 static inline void superio_exit(int base)
 {
        outb(SIO_LOCK_KEY, base);
+       release_region(base, 2);
 }
 
 static inline int fan_from_reg(u16 reg)
@@ -2175,21 +2185,15 @@ static int f71882fg_remove(struct platform_device *pdev)
 static int __init f71882fg_find(int sioaddr, unsigned short *address,
        struct f71882fg_sio_data *sio_data)
 {
-       int err = -ENODEV;
        u16 devid;
-
-       /* Don't step on other drivers' I/O space by accident */
-       if (!request_region(sioaddr, 2, DRVNAME)) {
-               printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
-                               (int)sioaddr);
-               return -EBUSY;
-       }
-
-       superio_enter(sioaddr);
+       int err = superio_enter(sioaddr);
+       if (err)
+               return err;
 
        devid = superio_inw(sioaddr, SIO_REG_MANID);
        if (devid != SIO_FINTEK_ID) {
                pr_debug(DRVNAME ": Not a Fintek device\n");
+               err = -ENODEV;
                goto exit;
        }
 
@@ -2213,6 +2217,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
        default:
                printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",
                       (unsigned int)devid);
+               err = -ENODEV;
                goto exit;
        }
 
@@ -2223,12 +2228,14 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
 
        if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
                printk(KERN_WARNING DRVNAME ": Device not activated\n");
+               err = -ENODEV;
                goto exit;
        }
 
        *address = superio_inw(sioaddr, SIO_REG_ADDR);
        if (*address == 0) {
                printk(KERN_WARNING DRVNAME ": Base address not set\n");
+               err = -ENODEV;
                goto exit;
        }
        *address &= ~(REGION_LENGTH - 1);       /* Ignore 3 LSB */
@@ -2239,7 +2246,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
                (int)superio_inb(sioaddr, SIO_REG_DEVREV));
 exit:
        superio_exit(sioaddr);
-       release_region(sioaddr, 2);
        return err;
 }
 
index 6138f036b159956dbc4eec8282636db794485527..fc591ae53107da8481a2ab5e02f53117ac6e2471 100644 (file)
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
        wake_up_interruptible(&lis3_dev.misc_wait);
        kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
 out:
-       if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
+       if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
            lis3_dev.idev->input->users)
                return IRQ_WAKE_THREAD;
        return IRQ_HANDLED;
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
         * io-apic is not configurable (and generates a warning) but I keep it
         * in case of support for other hardware.
         */
-       if (dev->whoami == WAI_8B)
+       if (dev->pdata && dev->whoami == WAI_8B)
                thread_fn = lis302dl_interrupt_thread1_8b;
        else
                thread_fn = NULL;
index 74157fcda6edf4bc569469db4ae1de9efa5b882d..f11903936c8b3a51c3f8dddcb20318185952fa79 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/list.h>
 #include <linux/platform_device.h>
 #include <linux/cpu.h>
-#include <linux/pci.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
 
@@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
 
        err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
        if (err)
-               goto exit_free;
+               goto exit_dev;
 
        data->hwmon_dev = hwmon_device_register(&pdev->dev);
        if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
 
 exit_class:
        sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+exit_dev:
+       device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
 exit_free:
        kfree(data);
 exit:
@@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev)
 
        hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+       device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
        platform_set_drvdata(pdev, NULL);
        kfree(data);
        return 0;
@@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
        int err;
        struct platform_device *pdev;
        struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
        struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+       if (!cpu_has(c, X86_FEATURE_PTS))
+               return 0;
 
        mutex_lock(&pdev_list_mutex);
 
@@ -339,17 +342,18 @@ exit:
 #ifdef CONFIG_HOTPLUG_CPU
 static void pkgtemp_device_remove(unsigned int cpu)
 {
-       struct pdev_entry *p, *n;
+       struct pdev_entry *p;
        unsigned int i;
        int err;
 
        mutex_lock(&pdev_list_mutex);
-       list_for_each_entry_safe(p, n, &pdev_list, list) {
+       list_for_each_entry(p, &pdev_list, list) {
                if (p->cpu != cpu)
                        continue;
 
                platform_device_unregister(p->pdev);
                list_del(&p->list);
+               mutex_unlock(&pdev_list_mutex);
                kfree(p);
                for_each_cpu(i, cpu_core_mask(cpu)) {
                        if (i != cpu) {
@@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu)
                                        break;
                        }
                }
-               break;
+               return;
        }
        mutex_unlock(&pdev_list_mutex);
 }
@@ -399,11 +403,6 @@ static int __init pkgtemp_init(void)
                goto exit;
 
        for_each_online_cpu(i) {
-               struct cpuinfo_x86 *c = &cpu_data(i);
-
-               if (!cpu_has(c, X86_FEATURE_PTS))
-                       continue;
-
                err = pkgtemp_device_add(i);
                if (err)
                        goto exit_devices_unreg;
index 2222c87876b97bc711b6d739fd4a82deef7db330..b8feac5f2ef4f6f4be4d410ec7a5a2ce2a6d4946 100644 (file)
@@ -357,9 +357,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
 
        dev->terminate = 0;
 
-       /* write the data into mode register */
-       davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
-
        /*
         * First byte should be set here, not after interrupt,
         * because transmit-data-ready interrupt can come before
@@ -371,6 +368,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
                dev->buf_len--;
        }
 
+       /* write the data into mode register; start transmitting */
+       davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
+
        r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
                                                      dev->adapter.timeout);
        if (r == 0) {
index 0e9f85d0a835718dac97ecd52ff270f327d84136..56dbe54e88118a3fb7b112da16e11ccd5bdbc9fb 100644 (file)
@@ -218,7 +218,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
                return result;
        } else if (result == 0) {
                dev_dbg(i2c->dev, "%s: timeout\n", __func__);
-               result = -ETIMEDOUT;
+               return -ETIMEDOUT;
        }
 
        return 0;
index 72902e0bbfa79a48caaf2193420d4b8712af1e18..bf831bf8158741a9f857eb541afc1f3a48d38e52 100644 (file)
@@ -662,8 +662,8 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
                unsigned long sda_delay;
 
                if (pdata->sda_delay) {
-                       sda_delay = (freq / 1000) * pdata->sda_delay;
-                       sda_delay /= 1000000;
+                       sda_delay = clkin * pdata->sda_delay;
+                       sda_delay = DIV_ROUND_UP(sda_delay, 1000000);
                        sda_delay = DIV_ROUND_UP(sda_delay, 5);
                        if (sda_delay > 3)
                                sda_delay = 3;
old mode 100755 (executable)
new mode 100644 (file)
index a10152b..0906fc5
@@ -83,7 +83,7 @@ static unsigned int mwait_substates;
 /* Reliable LAPIC Timer States, bit 1 for C1 etc.  */
 static unsigned int lapic_timer_reliable_states;
 
-static struct cpuidle_device *intel_idle_cpuidle_devices;
+static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
 
 static struct cpuidle_state *cpuidle_state_table;
@@ -108,7 +108,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "NHM-C3",
                .desc = "MWAIT 0x10",
                .driver_data = (void *) 0x10,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 20,
                .power_usage = 500,
                .target_residency = 80,
@@ -117,7 +117,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "NHM-C6",
                .desc = "MWAIT 0x20",
                .driver_data = (void *) 0x20,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 200,
                .power_usage = 350,
                .target_residency = 800,
@@ -149,7 +149,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "ATM-C4",
                .desc = "MWAIT 0x30",
                .driver_data = (void *) 0x30,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 100,
                .power_usage = 250,
                .target_residency = 400,
@@ -159,7 +159,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "ATM-C6",
                .desc = "MWAIT 0x40",
                .driver_data = (void *) 0x40,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 200,
                .power_usage = 150,
                .target_residency = 800,
@@ -185,6 +185,16 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
 
        local_irq_disable();
 
+       /*
+        * If the state flag indicates that the TLB will be flushed or if this
+        * is the deepest c-state supported, do a voluntary leave mm to avoid
+        * costly and mostly unnecessary wakeups for flushing the user TLB's
+        * associated with the active mm.
+        */
+       if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
+           (&dev->states[dev->state_count - 1] == state))
+               leave_mm(cpu);
+
        if (!(lapic_timer_reliable_states & (1 << (cstate))))
                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 
index d88077a219944ec49e2f594de780d0f216f9201f..13c88871dc3b90f564a52b4651aa371a4cd15633 100644 (file)
@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep)
            V_MSS_IDX(mtu_idx) |
            V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
        opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
-       opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+       opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+              V_CONG_CONTROL_FLAVOR(cong_flavor);
        skb->priority = CPL_PRIORITY_SETUP;
        set_arp_failure_handler(skb, act_open_req_arp_failure);
 
@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
            V_MSS_IDX(mtu_idx) |
            V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
        opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
-       opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+       opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+              V_CONG_CONTROL_FLAVOR(cong_flavor);
 
        rpl = cplhdr(skb);
        rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
index 74dce4ba0262560977ae88d69fbb2fbe7db62009..350eb34f049c97520bb183d97f9f84f8864663b5 100644 (file)
@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
        int cmd_level;
        int slow_level;
 
-       read_lock(&led_dat->rw_lock);
+       read_lock_irq(&led_dat->rw_lock);
 
        cmd_level = gpio_get_value(led_dat->cmd);
        slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
                }
        }
 
-       read_unlock(&led_dat->rw_lock);
+       read_unlock_irq(&led_dat->rw_lock);
 
        return ret;
 }
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
                             enum ns2_led_modes mode)
 {
        int i;
+       unsigned long flags;
 
-       write_lock(&led_dat->rw_lock);
+       write_lock_irqsave(&led_dat->rw_lock, flags);
 
        for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
                if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
                }
        }
 
-       write_unlock(&led_dat->rw_lock);
+       write_unlock_irqrestore(&led_dat->rw_lock, flags);
 }
 
 static void ns2_led_set(struct led_classdev *led_cdev,
index 04028a9ee082735278557b606840619ade7e29a9..428377a5a6f56fe94ca033730a49102f19164dd2 100644 (file)
@@ -429,24 +429,25 @@ static void max8925_irq_sync_unlock(unsigned int irq)
        irq_tsc = cache_tsc;
        for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
                irq_data = &max8925_irqs[i];
+               /* 1 -- disable, 0 -- enable */
                switch (irq_data->mask_reg) {
                case MAX8925_CHG_IRQ1_MASK:
-                       irq_chg[0] &= irq_data->enable;
+                       irq_chg[0] &= ~irq_data->enable;
                        break;
                case MAX8925_CHG_IRQ2_MASK:
-                       irq_chg[1] &= irq_data->enable;
+                       irq_chg[1] &= ~irq_data->enable;
                        break;
                case MAX8925_ON_OFF_IRQ1_MASK:
-                       irq_on[0] &= irq_data->enable;
+                       irq_on[0] &= ~irq_data->enable;
                        break;
                case MAX8925_ON_OFF_IRQ2_MASK:
-                       irq_on[1] &= irq_data->enable;
+                       irq_on[1] &= ~irq_data->enable;
                        break;
                case MAX8925_RTC_IRQ_MASK:
-                       irq_rtc &= irq_data->enable;
+                       irq_rtc &= ~irq_data->enable;
                        break;
                case MAX8925_TSC_IRQ_MASK:
-                       irq_tsc &= irq_data->enable;
+                       irq_tsc &= ~irq_data->enable;
                        break;
                default:
                        dev_err(chip->dev, "wrong IRQ\n");
index 7dabe4dbd3732e1d75c396b9b1e01bdeafafa57c..294183b6260b1facff3d26764eb3cea8c6d4b011 100644 (file)
@@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
 
        irq = irq - wm831x->irq_base;
 
-       if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11)
-               return -EINVAL;
+       if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
+               /* Ignore internal-only IRQs */
+               if (irq >= 0 && irq < WM831X_NUM_IRQS)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
 
        switch (type) {
        case IRQ_TYPE_EDGE_BOTH:
index 0b591b658243a7675ac66d24b97e7ea1ceaa9781..b74331260744db8829a2a8712a0d08bf5d42f949 100644 (file)
@@ -368,7 +368,7 @@ config VMWARE_BALLOON
          If unsure, say N.
 
          To compile this driver as a module, choose M here: the
-         module will be called vmware_balloon.
+         module will be called vmw_balloon.
 
 config ARM_CHARLCD
        bool "ARM Ltd. Character LCD Driver"
index 255a80dc9d73267a115da07c5729e04aa1c8a8ee..42eab95cde2af49e8ae737036695a1093caa01d9 100644 (file)
@@ -33,5 +33,5 @@ obj-$(CONFIG_IWMC3200TOP)      += iwmc3200top/
 obj-$(CONFIG_HMC6352)          += hmc6352.o
 obj-y                          += eeprom/
 obj-y                          += cb710/
-obj-$(CONFIG_VMWARE_BALLOON)   += vmware_balloon.o
+obj-$(CONFIG_VMWARE_BALLOON)   += vmw_balloon.o
 obj-$(CONFIG_ARM_CHARLCD)      += arm-charlcd.o
index 71ad4163b95e12b45aab7d41379c8022995d8ac4..aacb862ecc8a979f8022b8e0dd3428ed26c15baa 100644 (file)
@@ -241,8 +241,10 @@ static struct sdhci_ops sdhci_s3c_ops = {
 static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
 {
        struct sdhci_host *host = platform_get_drvdata(dev);
+       unsigned long flags;
+
        if (host) {
-               spin_lock(&host->lock);
+               spin_lock_irqsave(&host->lock, flags);
                if (state) {
                        dev_dbg(&dev->dev, "card inserted.\n");
                        host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -253,7 +255,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
                        host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
                }
                tasklet_schedule(&host->card_tasklet);
-               spin_unlock(&host->lock);
+               spin_unlock_irqrestore(&host->lock, flags);
        }
 }
 
@@ -481,8 +483,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
        sdhci_remove_host(host, 1);
 
        for (ptr = 0; ptr < 3; ptr++) {
-               clk_disable(sc->clk_bus[ptr]);
-               clk_put(sc->clk_bus[ptr]);
+               if (sc->clk_bus[ptr]) {
+                       clk_disable(sc->clk_bus[ptr]);
+                       clk_put(sc->clk_bus[ptr]);
+               }
        }
        clk_disable(sc->clk_io);
        clk_put(sc->clk_io);
index 133d51528f8dc0fb79eae4d12230e1a65bd7595e..513e0a76a4a73866d52bba8151e43556a3b30a54 100644 (file)
@@ -413,7 +413,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
                prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
        } while (prefetch_status);
        /* disable and stop the PFPW engine */
-       gpmc_prefetch_reset();
+       gpmc_prefetch_reset(info->gpmc_cs);
 
        dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
        return 0;
index fa42103b287429e2736f83e38b275c6d44a9b36c..179871d9e71f9a1f111645f58c68b7fea55b0861 100644 (file)
@@ -2942,6 +2942,9 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct vortex_private *vp = netdev_priv(dev);
 
+       if (!VORTEX_PCI(vp))
+               return;
+
        wol->supported = WAKE_MAGIC;
 
        wol->wolopts = 0;
@@ -2952,6 +2955,10 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct vortex_private *vp = netdev_priv(dev);
+
+       if (!VORTEX_PCI(vp))
+               return -EOPNOTSUPP;
+
        if (wol->wolopts & ~WAKE_MAGIC)
                return -EINVAL;
 
@@ -3201,6 +3208,9 @@ static void acpi_set_WOL(struct net_device *dev)
                        return;
                }
 
+               if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
+                       return;
+
                /* Change the power state to D3; RxEnable doesn't take effect. */
                pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
        }
index 63b9ba0cc67e13c408686442f1e2fe984ee9b462..c73be2848319deecd38a3d6f5e8e45ed8637e6f9 100644 (file)
@@ -1251,6 +1251,12 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
 
        rrd_ring->desc = NULL;
        rrd_ring->dma = 0;
+
+       adapter->cmb.dma = 0;
+       adapter->cmb.cmb = NULL;
+
+       adapter->smb.dma = 0;
+       adapter->smb.smb = NULL;
 }
 
 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@ -2847,10 +2853,11 @@ static int atl1_resume(struct pci_dev *pdev)
        pci_enable_wake(pdev, PCI_D3cold, 0);
 
        atl1_reset_hw(&adapter->hw);
-       adapter->cmb.cmb->int_stats = 0;
 
-       if (netif_running(netdev))
+       if (netif_running(netdev)) {
+               adapter->cmb.cmb->int_stats = 0;
                atl1_up(adapter);
+       }
        netif_device_attach(netdev);
 
        return 0;
index 66ed08f726fb9bcdd13225086d05f3dcf437da6a..ba302a5c2c30e14c2be16333f38adb2a2668a89c 100644 (file)
@@ -57,6 +57,7 @@ enum e1e_registers {
        E1000_SCTL     = 0x00024, /* SerDes Control - RW */
        E1000_FCAL     = 0x00028, /* Flow Control Address Low - RW */
        E1000_FCAH     = 0x0002C, /* Flow Control Address High -RW */
+       E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
        E1000_FEXTNVM  = 0x00028, /* Future Extended NVM - RW */
        E1000_FCT      = 0x00030, /* Flow Control Type - RW */
        E1000_VET      = 0x00038, /* VLAN Ether Type - RW */
index 63930d12711cf44ceb3334e5334fef5089ecb2e2..57b5435599ab1d61c58e393d04e0a2f3912d66fc 100644 (file)
 #define E1000_FEXTNVM_SW_CONFIG                1
 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
 
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK    0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC   0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
+
 #define PCIE_ICH8_SNOOP_ALL            PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES          7
 
 /* SMBus Address Phy Register */
 #define HV_SMB_ADDR            PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK       0x007F
 #define HV_SMB_ADDR_PEC_EN     0x0200
 #define HV_SMB_ADDR_VALID      0x0080
 
@@ -237,6 +242,8 @@ static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
 
 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 {
@@ -272,7 +279,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
 {
        struct e1000_phy_info *phy = &hw->phy;
-       u32 ctrl;
+       u32 ctrl, fwsm;
        s32 ret_val = 0;
 
        phy->addr                     = 1;
@@ -294,7 +301,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
         * disabled, then toggle the LANPHYPC Value bit to force
         * the interconnect to PCIe mode.
         */
-       if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+       fwsm = er32(FWSM);
+       if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
                ctrl = er32(CTRL);
                ctrl |=  E1000_CTRL_LANPHYPC_OVERRIDE;
                ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -303,6 +311,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
                ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
                ew32(CTRL, ctrl);
                msleep(50);
+
+               /*
+                * Gate automatic PHY configuration by hardware on
+                * non-managed 82579
+                */
+               if (hw->mac.type == e1000_pch2lan)
+                       e1000_gate_hw_phy_config_ich8lan(hw, true);
        }
 
        /*
@@ -315,6 +330,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
        if (ret_val)
                goto out;
 
+       /* Ungate automatic PHY configuration on non-managed 82579 */
+       if ((hw->mac.type == e1000_pch2lan)  &&
+           !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+               msleep(10);
+               e1000_gate_hw_phy_config_ich8lan(hw, false);
+       }
+
        phy->id = e1000_phy_unknown;
        ret_val = e1000e_get_phy_id(hw);
        if (ret_val)
@@ -561,13 +583,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
        if (mac->type == e1000_ich8lan)
                e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
 
-       /* Disable PHY configuration by hardware, config by software */
-       if (mac->type == e1000_pch2lan) {
-               u32 extcnf_ctrl = er32(EXTCNF_CTRL);
-
-               extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
-               ew32(EXTCNF_CTRL, extcnf_ctrl);
-       }
+       /* Gate automatic PHY configuration by hardware on managed 82579 */
+       if ((mac->type == e1000_pch2lan) &&
+           (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+               e1000_gate_hw_phy_config_ich8lan(hw, true);
 
        return 0;
 }
@@ -652,6 +671,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        goto out;
        }
 
+       if (hw->mac.type == e1000_pch2lan) {
+               ret_val = e1000_k1_workaround_lv(hw);
+               if (ret_val)
+                       goto out;
+       }
+
        /*
         * Check if there was DownShift, must be checked
         * immediately after link-up
@@ -894,6 +919,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
        return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
 }
 
+/**
+ *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ *  @hw: pointer to the HW structure
+ *
+ *  Assumes semaphore already acquired.
+ *
+ **/
+static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+       u16 phy_data;
+       u32 strap = er32(STRAP);
+       s32 ret_val = 0;
+
+       strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+       ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data &= ~HV_SMB_ADDR_MASK;
+       phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+       phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+       ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+
+out:
+       return ret_val;
+}
+
 /**
  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
  *  @hw:   pointer to the HW structure
@@ -903,7 +956,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
  **/
 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
 {
-       struct e1000_adapter *adapter = hw->adapter;
        struct e1000_phy_info *phy = &hw->phy;
        u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
        s32 ret_val = 0;
@@ -921,7 +973,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
                if (phy->type != e1000_phy_igp_3)
                        return ret_val;
 
-               if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) {
+               if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
+                   (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
                        sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
                        break;
                }
@@ -961,21 +1014,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
        cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
        cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
 
-       if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
-           ((hw->mac.type == e1000_pchlan) ||
-            (hw->mac.type == e1000_pch2lan))) {
+       if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+           (hw->mac.type == e1000_pchlan)) ||
+            (hw->mac.type == e1000_pch2lan)) {
                /*
                 * HW configures the SMBus address and LEDs when the
                 * OEM and LCD Write Enable bits are set in the NVM.
                 * When both NVM bits are cleared, SW will configure
                 * them instead.
                 */
-               data = er32(STRAP);
-               data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
-               reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
-               reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
-               ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
-                                                       reg_data);
+               ret_val = e1000_write_smbus_addr(hw);
                if (ret_val)
                        goto out;
 
@@ -1440,10 +1488,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                        goto out;
 
                /* Enable jumbo frame workaround in the PHY */
-               e1e_rphy(hw, PHY_REG(769, 20), &data);
-               ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
-               if (ret_val)
-                       goto out;
                e1e_rphy(hw, PHY_REG(769, 23), &data);
                data &= ~(0x7F << 5);
                data |= (0x37 << 5);
@@ -1452,7 +1496,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                        goto out;
                e1e_rphy(hw, PHY_REG(769, 16), &data);
                data &= ~(1 << 13);
-               data |= (1 << 12);
                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
                if (ret_val)
                        goto out;
@@ -1477,7 +1520,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
 
                mac_reg = er32(RCTL);
                mac_reg &= ~E1000_RCTL_SECRC;
-               ew32(FFLT_DBG, mac_reg);
+               ew32(RCTL, mac_reg);
 
                ret_val = e1000e_read_kmrn_reg(hw,
                                                E1000_KMRNCTRLSTA_CTRL_OFFSET,
@@ -1503,17 +1546,12 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                        goto out;
 
                /* Write PHY register values back to h/w defaults */
-               e1e_rphy(hw, PHY_REG(769, 20), &data);
-               ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
-               if (ret_val)
-                       goto out;
                e1e_rphy(hw, PHY_REG(769, 23), &data);
                data &= ~(0x7F << 5);
                ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
                if (ret_val)
                        goto out;
                e1e_rphy(hw, PHY_REG(769, 16), &data);
-               data &= ~(1 << 12);
                data |= (1 << 13);
                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
                if (ret_val)
@@ -1558,6 +1596,69 @@ out:
        return ret_val;
 }
 
+/**
+ *  e1000_k1_gig_workaround_lv - K1 Si workaround
+ *  @hw:   pointer to the HW structure
+ *
+ *  Workaround to set the K1 beacon duration for 82579 parts
+ **/
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+       s32 ret_val = 0;
+       u16 status_reg = 0;
+       u32 mac_reg;
+
+       if (hw->mac.type != e1000_pch2lan)
+               goto out;
+
+       /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+       ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
+       if (ret_val)
+               goto out;
+
+       if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+           == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+               mac_reg = er32(FEXTNVM4);
+               mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+
+               if (status_reg & HV_M_STATUS_SPEED_1000)
+                       mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+               else
+                       mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+
+               ew32(FEXTNVM4, mac_reg);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ *  @hw:   pointer to the HW structure
+ *  @gate: boolean set to true to gate, false to ungate
+ *
+ *  Gate/ungate the automatic PHY configuration via hardware; perform
+ *  the configuration via software instead.
+ **/
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+       u32 extcnf_ctrl;
+
+       if (hw->mac.type != e1000_pch2lan)
+               return;
+
+       extcnf_ctrl = er32(EXTCNF_CTRL);
+
+       if (gate)
+               extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+       else
+               extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+       ew32(EXTCNF_CTRL, extcnf_ctrl);
+       return;
+}
+
 /**
  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
  *  @hw: pointer to the HW structure
@@ -1602,6 +1703,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
        if (e1000_check_reset_block(hw))
                goto out;
 
+       /* Allow time for h/w to get to quiescent state after reset */
+       msleep(10);
+
        /* Perform any necessary post-reset workarounds */
        switch (hw->mac.type) {
        case e1000_pchlan:
@@ -1630,6 +1734,13 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
        /* Configure the LCD with the OEM bits in NVM */
        ret_val = e1000_oem_bits_config_ich8lan(hw, true);
 
+       /* Ungate automatic PHY configuration on non-managed 82579 */
+       if ((hw->mac.type == e1000_pch2lan) &&
+           !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+               msleep(10);
+               e1000_gate_hw_phy_config_ich8lan(hw, false);
+       }
+
 out:
        return ret_val;
 }
@@ -1646,6 +1757,11 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
 {
        s32 ret_val = 0;
 
+       /* Gate automatic PHY configuration by hardware on non-managed 82579 */
+       if ((hw->mac.type == e1000_pch2lan) &&
+           !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+               e1000_gate_hw_phy_config_ich8lan(hw, true);
+
        ret_val = e1000e_phy_hw_reset_generic(hw);
        if (ret_val)
                goto out;
@@ -2910,6 +3026,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
                 * external PHY is reset.
                 */
                ctrl |= E1000_CTRL_PHY_RST;
+
+               /*
+                * Gate automatic PHY configuration by hardware on
+                * non-managed 82579
+                */
+               if ((hw->mac.type == e1000_pch2lan) &&
+                   !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+                       e1000_gate_hw_phy_config_ich8lan(hw, true);
        }
        ret_val = e1000_acquire_swflag_ich8lan(hw);
        e_dbg("Issuing a global reset to ich8lan\n");
@@ -3460,13 +3584,20 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
 void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
 {
        u32 phy_ctrl;
+       s32 ret_val;
 
        phy_ctrl = er32(PHY_CTRL);
        phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
        ew32(PHY_CTRL, phy_ctrl);
 
-       if (hw->mac.type >= e1000_pchlan)
-               e1000_phy_hw_reset_ich8lan(hw);
+       if (hw->mac.type >= e1000_pchlan) {
+               e1000_oem_bits_config_ich8lan(hw, true);
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       return;
+               e1000_write_smbus_addr(hw);
+               hw->phy.ops.release(hw);
+       }
 }
 
 /**
index 2b8ef44bd2b1629c5d203206479a3b8bab510fab..e561d15c3eb161558f9a7da3825f3cc6c90840c7 100644 (file)
@@ -2704,6 +2704,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
        u32 psrctl = 0;
        u32 pages = 0;
 
+       /* Workaround Si errata on 82579 - configure jumbo frame flow */
+       if (hw->mac.type == e1000_pch2lan) {
+               s32 ret_val;
+
+               if (adapter->netdev->mtu > ETH_DATA_LEN)
+                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+               else
+                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+       }
+
        /* Program MC offset vector base */
        rctl = er32(RCTL);
        rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
@@ -2744,16 +2754,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
                e1e_wphy(hw, 22, phy_data);
        }
 
-       /* Workaround Si errata on 82579 - configure jumbo frame flow */
-       if (hw->mac.type == e1000_pch2lan) {
-               s32 ret_val;
-
-               if (rctl & E1000_RCTL_LPE)
-                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
-               else
-                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
-       }
-
        /* Setup buffer sizes */
        rctl &= ~E1000_RCTL_SZ_4096;
        rctl |= E1000_RCTL_BSEX;
@@ -4833,6 +4833,15 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                return -EINVAL;
        }
 
+       /* Jumbo frame workaround on 82579 requires CRC be stripped */
+       if ((adapter->hw.mac.type == e1000_pch2lan) &&
+           !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
+           (new_mtu > ETH_DATA_LEN)) {
+               e_err("Jumbo Frames not supported on 82579 when CRC "
+                     "stripping is disabled.\n");
+               return -EINVAL;
+       }
+
        /* 82573 Errata 17 */
        if (((adapter->hw.mac.type == e1000_82573) ||
             (adapter->hw.mac.type == e1000_82574)) &&
index 3506fd6ad7263be467b9128938b248bbaf3dd054..519e19e23955a3c3a86d8e04f615b67daa7a996e 100644 (file)
@@ -2928,7 +2928,7 @@ static int __devinit emac_probe(struct platform_device *ofdev,
        if (dev->emac_irq != NO_IRQ)
                irq_dispose_mapping(dev->emac_irq);
  err_free:
-       kfree(ndev);
+       free_netdev(ndev);
  err_gone:
        /* if we were on the bootlist, remove us as we won't show up and
         * wake up all waiters to notify them in case they were waiting
@@ -2971,7 +2971,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
        if (dev->emac_irq != NO_IRQ)
                irq_dispose_mapping(dev->emac_irq);
 
-       kfree(dev->ndev);
+       free_netdev(dev->ndev);
 
        return 0;
 }
index cabae7bb1fc6777d3366c5a8728feadcd53d0aa3..b075a35b85d4ef09cfab1b2df3b42be0313c2516 100644 (file)
@@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter,
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff);
        skb->protocol = eth_type_trans(skb, netdev);
 
        napi_gro_receive(&sds_ring->napi, skb);
@@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter,
 
        skb_put(skb, lro_length + data_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
        skb_pull(skb, l2_hdr_offset);
        skb->protocol = eth_type_trans(skb, netdev);
 
index 75ba744b173c89d0f598d1d61183fd5d9dca1675..2c7cf0b64811ed72d4d6368c02d46fa966e7de2f 100644 (file)
@@ -1316,7 +1316,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
                return -ENOMEM;
        }
 
-       skb_reserve(skb, 2);
+       skb_reserve(skb, NET_IP_ALIGN);
 
        dma = pci_map_single(pdev, skb->data,
                        rds_ring->dma_size, PCI_DMA_FROMDEVICE);
@@ -1404,7 +1404,6 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff);
        skb->protocol = eth_type_trans(skb, netdev);
 
        napi_gro_receive(&sds_ring->napi, skb);
@@ -1466,8 +1465,6 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
 
        skb_put(skb, lro_length + data_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
        skb_pull(skb, l2_hdr_offset);
        skb->protocol = eth_type_trans(skb, netdev);
 
@@ -1700,8 +1697,6 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff);
-
        if (!qlcnic_check_loopback_buff(skb->data))
                adapter->diag_cnt++;
 
index 07eb884ff982405c3d15204a04d6188394e329f3..44150f2f7bfd6b206b17e0a64064d5996d0033ea 100644 (file)
@@ -384,7 +384,7 @@ static void rionet_remove(struct rio_dev *rdev)
        free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
                                        __ilog2(sizeof(void *)) + 4 : 0);
        unregister_netdev(ndev);
-       kfree(ndev);
+       free_netdev(ndev);
 
        list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
                list_del(&peer->node);
index cc4bd8c65f8b6a58846065430f8b89c1be6a4bc3..9265315baa0b29bdc2c7a9e20db20462dcf52ae5 100644 (file)
@@ -804,7 +804,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
 err_out_free_page:
        free_page((unsigned long) sp->srings);
 err_out_free_dev:
-       kfree(dev);
+       free_netdev(dev);
 
 err_out:
        return err;
index 0909ae934ad0fcee52c5eb67101240e77fb70dde..8150ba1541161ab2e7781ade13be970540f835ec 100644 (file)
@@ -58,6 +58,7 @@
 
 MODULE_LICENSE("GPL");
 MODULE_VERSION(SMSC_DRV_VERSION);
+MODULE_ALIAS("platform:smsc911x");
 
 #if USE_DEBUG > 0
 static int debug = 16;
index 5efa57757a2c8507f5bcf0d0d7f3ac5d1b5a26c3..6888e3d41462081952c7320b501736f03230ba78 100644 (file)
@@ -243,6 +243,7 @@ enum {
        NWayState               = (1 << 14) | (1 << 13) | (1 << 12),
        NWayRestart             = (1 << 12),
        NonselPortActive        = (1 << 9),
+       SelPortActive           = (1 << 8),
        LinkFailStatus          = (1 << 2),
        NetCxnErr               = (1 << 1),
 };
@@ -363,7 +364,9 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
 
 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
-static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
+/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
+static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
 
 
@@ -1064,6 +1067,9 @@ static void de21041_media_timer (unsigned long data)
        unsigned int carrier;
        unsigned long flags;
 
+       /* clear port active bits */
+       dw32(SIAStatus, NonselPortActive | SelPortActive);
+
        carrier = (status & NetCxnErr) ? 0 : 1;
 
        if (carrier) {
@@ -1158,14 +1164,29 @@ no_link_yet:
 static void de_media_interrupt (struct de_private *de, u32 status)
 {
        if (status & LinkPass) {
+               /* Ignore if current media is AUI or BNC and we can't use TP */
+               if ((de->media_type == DE_MEDIA_AUI ||
+                    de->media_type == DE_MEDIA_BNC) &&
+                   (de->media_lock ||
+                    !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
+                       return;
+               /* If current media is not TP, change it to TP */
+               if ((de->media_type == DE_MEDIA_AUI ||
+                    de->media_type == DE_MEDIA_BNC)) {
+                       de->media_type = DE_MEDIA_TP_AUTO;
+                       de_stop_rxtx(de);
+                       de_set_media(de);
+                       de_start_rxtx(de);
+               }
                de_link_up(de);
                mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
                return;
        }
 
        BUG_ON(!(status & LinkFail));
-
-       if (netif_carrier_ok(de->dev)) {
+       /* Mark the link as down only if current media is TP */
+       if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
+           de->media_type != DE_MEDIA_BNC) {
                de_link_down(de);
                mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
        }
@@ -1229,6 +1250,7 @@ static void de_adapter_sleep (struct de_private *de)
        if (de->de21040)
                return;
 
+       dw32(CSR13, 0); /* Reset phy */
        pci_read_config_dword(de->pdev, PCIPM, &pmctl);
        pmctl |= PM_Sleep;
        pci_write_config_dword(de->pdev, PCIPM, pmctl);
@@ -1574,12 +1596,15 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
                return 0; /* nothing to change */
 
        de_link_down(de);
+       mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
        de_stop_rxtx(de);
 
        de->media_type = new_media;
        de->media_lock = media_lock;
        de->media_advertise = ecmd->advertising;
        de_set_media(de);
+       if (netif_running(de->dev))
+               de_start_rxtx(de);
 
        return 0;
 }
@@ -1911,8 +1936,14 @@ fill_defaults:
        for (i = 0; i < DE_MAX_MEDIA; i++) {
                if (de->media[i].csr13 == 0xffff)
                        de->media[i].csr13 = t21041_csr13[i];
-               if (de->media[i].csr14 == 0xffff)
-                       de->media[i].csr14 = t21041_csr14[i];
+               if (de->media[i].csr14 == 0xffff) {
+                       /* autonegotiation is broken at least on some chip
+                          revisions - rev. 0x21 works, 0x11 does not */
+                       if (de->pdev->revision < 0x20)
+                               de->media[i].csr14 = t21041_csr14_brk[i];
+                       else
+                               de->media[i].csr14 = t21041_csr14[i];
+               }
                if (de->media[i].csr15 == 0xffff)
                        de->media[i].csr15 = t21041_csr15[i];
        }
@@ -2158,6 +2189,8 @@ static int de_resume (struct pci_dev *pdev)
                dev_err(&dev->dev, "pci_enable_device failed in resume\n");
                goto out;
        }
+       pci_set_master(pdev);
+       de_init_rings(de);
        de_init_hw(de);
 out_attach:
        netif_device_attach(dev);
index 9dd9e64c2b0b1a69f11a6d40a5e0e2cbc84d8b22..8fd00a6e512019075e966a038d2c5d09539ccf85 100644 (file)
@@ -1411,7 +1411,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        clear_bit(STATUS_SCAN_HW, &priv->status);
        clear_bit(STATUS_SCANNING, &priv->status);
        /* inform mac80211 scan aborted */
-       queue_work(priv->workqueue, &priv->scan_completed);
+       queue_work(priv->workqueue, &priv->abort_scan);
 }
 
 int iwlagn_manage_ibss_station(struct iwl_priv *priv,
index 07dbc27964480eebedb67760029dcadab9584f84..e23c4060a0f093e966ca355af1d466880f65b0ee 100644 (file)
@@ -2613,6 +2613,11 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return -EINVAL;
 
+       if (test_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_INFO(priv, "scan in progress.\n");
+               return -EINVAL;
+       }
+
        if (mode >= IWL_MAX_FORCE_RESET) {
                IWL_DEBUG_INFO(priv, "invalid reset request.\n");
                return -EINVAL;
index 59a308b02f95fdc077a84a7d12d71db7451b1816..d31661c1ce778259996b5428f9cff95b87f1a3db 100644 (file)
@@ -3018,7 +3018,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        clear_bit(STATUS_SCANNING, &priv->status);
 
        /* inform mac80211 scan aborted */
-       queue_work(priv->workqueue, &priv->scan_completed);
+       queue_work(priv->workqueue, &priv->abort_scan);
 }
 
 static void iwl3945_bg_restart(struct work_struct *data)
index b17235a24a4db285d5b5f65c5796f4e3b11ee019..79c0005134a18c540d0e74f2858a6e49525bcd80 100644 (file)
@@ -77,7 +77,7 @@ static int op_create_counter(int cpu, int event)
                return 0;
 
        pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
-                                                 cpu, -1,
+                                                 cpu, NULL,
                                                  op_overflow_handler);
 
        if (IS_ERR(pevent))
index c3ceebb5be84168ae71b845bdc52b3c9a6b557b3..4789f8e8bf7ad91cae0b0a1043349ac96ea5fa64 100644 (file)
 #define DMA_32BIT_PFN          IOVA_PFN(DMA_BIT_MASK(32))
 #define DMA_64BIT_PFN          IOVA_PFN(DMA_BIT_MASK(64))
 
+/* page table handling */
+#define LEVEL_STRIDE           (9)
+#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
+
+static inline int agaw_to_level(int agaw)
+{
+       return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+       return 30 + agaw * LEVEL_STRIDE;
+}
+
+static inline int width_to_agaw(int width)
+{
+       return (width - 30) / LEVEL_STRIDE;
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+       return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(unsigned long pfn, int level)
+{
+       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline unsigned long level_mask(int level)
+{
+       return -1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long level_size(int level)
+{
+       return 1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long align_to_level(unsigned long pfn, int level)
+{
+       return (pfn + level_size(level) - 1) & level_mask(level);
+}
 
 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
    are never going to work. */
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova)
 }
 
 
-static inline int width_to_agaw(int width);
-
 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
 {
        unsigned long sagaw;
@@ -646,51 +687,6 @@ out:
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-/* page table handling */
-#define LEVEL_STRIDE           (9)
-#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
-       return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
-       return 30 + agaw * LEVEL_STRIDE;
-
-}
-
-static inline int width_to_agaw(int width)
-{
-       return (width - 30) / LEVEL_STRIDE;
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
-       return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(unsigned long pfn, int level)
-{
-       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline unsigned long level_mask(int level)
-{
-       return -1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long level_size(int level)
-{
-       return 1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long align_to_level(unsigned long pfn, int level)
-{
-       return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
                                      unsigned long pfn)
 {
@@ -3761,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
 
+#define GGC 0x52
+#define GGC_MEMORY_SIZE_MASK   (0xf << 8)
+#define GGC_MEMORY_SIZE_NONE   (0x0 << 8)
+#define GGC_MEMORY_SIZE_1M     (0x1 << 8)
+#define GGC_MEMORY_SIZE_2M     (0x3 << 8)
+#define GGC_MEMORY_VT_ENABLED  (0x8 << 8)
+#define GGC_MEMORY_SIZE_2M_VT  (0x9 << 8)
+#define GGC_MEMORY_SIZE_3M_VT  (0xa << 8)
+#define GGC_MEMORY_SIZE_4M_VT  (0xb << 8)
+
+static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+{
+       unsigned short ggc;
+
+       if (pci_read_config_word(dev, GGC, &ggc))
+               return;
+
+       if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
+               printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+               dmar_map_gfx = 0;
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+
 /* On Tylersburg chipsets, some BIOSes have been known to enable the
    ISOCH DMAR unit for the Azalia sound device, but not give it any
    TLB entries, which causes it to deadlock. Check for that.  We do
index ce6a3666b3d9878f70be6fb65f3e1272fdf6b6b2..553d8ee55c1c4aa3c9f6567867642d3505cf89df 100644 (file)
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
  * the VF BAR size multiplied by the number of VFs.  The alignment
  * is just the VF BAR size.
  */
-int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
 {
        struct resource tmp;
        enum pci_bar_type type;
index 7754a678ab15cc77445d396aa59409658bf8006f..6beb11b617a92973f7a32cc47a7343a759218d6e 100644 (file)
@@ -264,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev);
 extern void pci_iov_release(struct pci_dev *dev);
 extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
                                enum pci_bar_type *type);
-extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
+                                                   int resno);
 extern void pci_restore_iov_state(struct pci_dev *dev);
 extern int pci_iov_bus_range(struct pci_bus *bus);
 
@@ -320,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
 }
 #endif /* CONFIG_PCI_IOV */
 
-static inline int pci_resource_alignment(struct pci_dev *dev,
+static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
                                         struct resource *res)
 {
 #ifdef CONFIG_PCI_IOV
index 89ed181cd90cd9cd68506a212eb423ba60f9c8e2..857ae01734a66156c8abb92335be91cc674964a0 100644 (file)
@@ -162,6 +162,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1,       quirk_isa_d
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,     PCI_DEVICE_ID_NEC_CBUS_2,       quirk_isa_dma_hangs);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,     PCI_DEVICE_ID_NEC_CBUS_3,       quirk_isa_dma_hangs);
 
+/*
+ * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
+ * for some HT machines to use C4 w/o hanging.
+ */
+static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
+{
+       u32 pmbase;
+       u16 pm1a;
+
+       pci_read_config_dword(dev, 0x40, &pmbase);
+       pmbase = pmbase & 0xff80;
+       pm1a = inw(pmbase);
+
+       if (pm1a & 0x10) {
+               dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
+               outw(0x10, pmbase);
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
+
 /*
  *     Chipsets where PCI->PCI transfers vanish or hang
  */
index a5c176598d9543d30e084a6e36daa72fbd65d80a..9ba4dade69a4d67cd5b0efc1e4c55a70dbf349c1 100644 (file)
@@ -595,7 +595,13 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
        if (c->io[1].end) {
                ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
                if (ret) {
+                       struct resource tmp = c->io[0];
+                       /* release the previously allocated resource */
                        release_io_space(s, &c->io[0]);
+                       /* but preserve the settings, for they worked... */
+                       c->io[0].end = resource_size(&tmp);
+                       c->io[0].start = tmp.start;
+                       c->io[0].flags = tmp.flags;
                        goto out;
                }
        } else
index b8a869af0f4410dd25a3cb0b4b5ace093ea5d6a5..deef6656ab7b8e2015e382ed48b85e067c7503a5 100644 (file)
@@ -646,7 +646,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
        if (!pci_resource_start(dev, 0)) {
                dev_warn(&dev->dev, "refusing to load the driver as the "
                        "io_base is NULL.\n");
-               goto err_out_free_mem;
+               goto err_out_disable;
        }
 
        dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
index e35ed128bdef439313a107106f36c47e03dc5b7e..2d61186ad5a2e96708fcec4beb0a8402eb2bc09f 100644 (file)
@@ -3093,7 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
        TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
 };
 
-typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+typedef u16 tpacpi_keymap_entry_t;
+typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
 
 static int __init hotkey_init(struct ibm_init_struct *iibm)
 {
@@ -3230,7 +3231,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        };
 
 #define TPACPI_HOTKEY_MAP_SIZE         sizeof(tpacpi_keymap_t)
-#define TPACPI_HOTKEY_MAP_TYPESIZE     sizeof(tpacpi_keymap_t[0])
+#define TPACPI_HOTKEY_MAP_TYPESIZE     sizeof(tpacpi_keymap_entry_t)
 
        int res, i;
        int status;
index 422a709d271d51d593899db82b0b930cf93f2847..cc8b337b9119de5e955aabe1935ad931a895c71a 100644 (file)
@@ -700,7 +700,7 @@ static void print_constraints(struct regulator_dev *rdev)
            constraints->min_uA != constraints->max_uA) {
                ret = _regulator_get_current_limit(rdev);
                if (ret > 0)
-                       count += sprintf(buf + count, "at %d uA ", ret / 1000);
+                       count += sprintf(buf + count, "at %d mA ", ret / 1000);
        }
 
        if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
@@ -2302,8 +2302,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
        dev_set_name(&rdev->dev, "regulator.%d",
                     atomic_inc_return(&regulator_no) - 1);
        ret = device_register(&rdev->dev);
-       if (ret != 0)
+       if (ret != 0) {
+               put_device(&rdev->dev);
                goto clean;
+       }
 
        dev_set_drvdata(&rdev->dev, rdev);
 
index 4520ace3f7e707f82ccbf0fb068df921dfc6c2df..6b60a9c0366b3c5236fa7019844274c8b1155b3e 100644 (file)
@@ -330,7 +330,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
                /* set external clock frequency */
                info->extclk_freq = pdata->extclk_freq;
                max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
-                                info->extclk_freq);
+                                info->extclk_freq << 6);
        }
 
        if (pdata->ramp_timing) {
index d26780ea254b5d9e9e296309dc23f2640418a7f5..261a07e0fb24c0dd7cd1d05d3d60c35d184b1e38 100644 (file)
@@ -235,6 +235,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
                err = PTR_ERR(rtc);
                return err;
        }
+       platform_set_drvdata(pdev, rtc);
 
        return 0;
 }
@@ -244,6 +245,7 @@ static int __exit ab3100_rtc_remove(struct platform_device *pdev)
        struct rtc_device *rtc = platform_get_drvdata(pdev);
 
        rtc_device_unregister(rtc);
+       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index a0d3ec89d412ac57d683fa5a446720a375dfab22..f57a87f4ae96abb367a2e08d353378b31f2a19fa 100644 (file)
@@ -310,11 +310,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        s3c_rtc_setaie(alrm->enabled);
 
-       if (alrm->enabled)
-               enable_irq_wake(s3c_rtc_alarmno);
-       else
-               disable_irq_wake(s3c_rtc_alarmno);
-
        return 0;
 }
 
@@ -587,6 +582,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
                ticnt_en_save &= S3C64XX_RTCCON_TICEN;
        }
        s3c_rtc_enable(pdev, 0);
+
+       if (device_may_wakeup(&pdev->dev))
+               enable_irq_wake(s3c_rtc_alarmno);
+
        return 0;
 }
 
@@ -600,6 +599,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
                tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
                writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
        }
+
+       if (device_may_wakeup(&pdev->dev))
+               disable_irq_wake(s3c_rtc_alarmno);
+
        return 0;
 }
 #else
index 6edf20b62de5bae28214275931f0db9fd1fcd1f2..2c7d2d9be4d0cd3099f63d2b2690242cc12eed50 100644 (file)
@@ -1154,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
                                dev_fsm, dev_fsm_len, GFP_KERNEL);
        if (priv->fsm == NULL) {
                CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
-               kfree(dev);
+               free_netdev(dev);
                return NULL;
        }
        fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
@@ -1165,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
                grp = ctcmpc_init_mpc_group(priv);
                if (grp == NULL) {
                        MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
-                       kfree(dev);
+                       free_netdev(dev);
                        return NULL;
                }
                tasklet_init(&grp->mpc_tasklet2,
index 324c385a653d3e0dc8240b0c28c2eab560ee9382..5dff45c76d32c5f026dc1ae408166a4f5e9ac6d2 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/sysrq.h>
+#include <linux/slab.h>
 #include <linux/serial_reg.h>
 #include <linux/circ_buf.h>
 #include <linux/delay.h>
index f6ad1ecbff79ec9688fbc024a2036f351efff26e..51c15f58e01ef84a8ce43ba8b62472f0e4d7bec0 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/module.h>
 #include <linux/ioport.h>
+#include <linux/irq.h>
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/sysrq.h>
index 0bcf4c1601a23a8225c2c4395c2309e191439f5d..b5a78a1f4421a0c19aa8735bd49f076fd8f91b46 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/init.h>
 #include <linux/cache.h>
 #include <linux/mutex.h>
+#include <linux/of_device.h>
 #include <linux/slab.h>
 #include <linux/mod_devicetable.h>
 #include <linux/spi/spi.h>
@@ -86,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
        const struct spi_device *spi = to_spi_device(dev);
        const struct spi_driver *sdrv = to_spi_driver(drv);
 
+       /* Attempt an OF style match */
+       if (of_driver_match_device(dev, drv))
+               return 1;
+
        if (sdrv->id_table)
                return !!spi_match_id(sdrv->id_table, spi);
 
index e24a63498acb84f7e4f9f83710aca3fadefcc177..63e51b011d508c8922bb047cfb94572049323493 100644 (file)
@@ -350,7 +350,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
        spi_gpio->bitbang.master = spi_master_get(master);
        spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
 
-       if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) {
+       if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
                spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
                spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
                spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
index d31b57f7baaf3c3bf970294afd13a1396cecf46c..1dd86b835cd86a800aa8db8257e4a37422a33b8a 100644 (file)
@@ -408,11 +408,17 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
 
        xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
 
-       out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
+       if (mspi->rx_dma == mspi->dma_dummy_rx)
+               out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
+       else
+               out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
        out_be16(&rx_bd->cbd_datlen, 0);
        out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
 
-       out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
+       if (mspi->tx_dma == mspi->dma_dummy_tx)
+               out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
+       else
+               out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
        out_be16(&tx_bd->cbd_datlen, xfer_len);
        out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
                                 BD_SC_LAST);
index 9952579425b99a477c29e488243337e9372b2200..1b3060eb2921c9da66fb2f0844de4a2d1afa4ce0 100644 (file)
@@ -80,5 +80,4 @@ struct st_proto_s {
 extern long st_register(struct st_proto_s *);
 extern long st_unregister(enum proto_type);
 
-extern struct platform_device *st_get_plat_device(void);
 #endif /* ST_H */
index 063c9b1db1ab655504f5908e7c4598418dafd0fd..b85d8bfdf600ad22cd4d62636b521bfb3ac38fa5 100644 (file)
@@ -38,7 +38,6 @@
 #include "st_ll.h"
 #include "st.h"
 
-#define VERBOSE
 /* strings to be used for rfkill entries and by
  * ST Core to be used for sysfs debug entry
  */
@@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto)
        long err = 0;
        unsigned long flags = 0;
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        pr_info("%s(%d) ", __func__, new_proto->type);
        if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
            || new_proto->reg_complete_cb == NULL) {
@@ -713,7 +712,7 @@ long st_unregister(enum proto_type type)
 
        pr_debug("%s: %d ", __func__, type);
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        if (type < ST_BT || type >= ST_MAX) {
                pr_err(" protocol %d not supported", type);
                return -EPROTONOSUPPORT;
@@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb)
 #endif
        long len;
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        if (unlikely(skb == NULL || st_gdata == NULL
                || st_gdata->tty == NULL)) {
                pr_err("data/tty unavailable to perform write");
@@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty)
        struct st_data_s *st_gdata;
        pr_info("%s ", __func__);
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        st_gdata->tty = tty;
        tty->disc_data = st_gdata;
 
index e0c32d149f5f294878d22597bec9c1bfc05660a4..8601320a679ee8ce5dda5ea9d36f8c85798dd23e 100644 (file)
@@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **);
 void st_core_exit(struct st_data_s *);
 
 /* ask for reference from KIM */
-void st_kim_ref(struct st_data_s **);
+void st_kim_ref(struct st_data_s **, int);
 
 #define GPS_STUB_TEST
 #ifdef GPS_STUB_TEST
index b4a6c7fdc4e6ca59283dd3e64736c0efdfd97407..9e99463f76e8a0d048f7ec8667a455125d88cb0b 100644 (file)
@@ -72,10 +72,25 @@ const unsigned char *protocol_names[] = {
        PROTO_ENTRY(ST_GPS, "GPS"),
 };
 
+#define MAX_ST_DEVICES 3       /* Imagine 1 on each UART for now */
+struct platform_device *st_kim_devices[MAX_ST_DEVICES];
 
 /**********************************************************************/
 /* internal functions */
 
+/**
+ * st_get_plat_device -
+ *     function which returns the reference to the platform device
+ *     requested by id. As of now only 1 such device exists (id=0)
+ *     the context requesting for reference can get the id to be
+ *     requested by a. The protocol driver which is registering or
+ *     b. the tty device which is opened.
+ */
+static struct platform_device *st_get_plat_device(int id)
+{
+       return st_kim_devices[id];
+}
+
 /**
  * validate_firmware_response -
  *     function to return whether the firmware response was proper
@@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
        struct kim_data_s       *kim_gdata;
        pr_info(" %s ", __func__);
 
-       kim_pdev = st_get_plat_device();
+       kim_pdev = st_get_plat_device(0);
        kim_gdata = dev_get_drvdata(&kim_pdev->dev);
 
        if (kim_gdata->gpios[type] == -1) {
@@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked)
  *     This would enable multiple such platform devices to exist
  *     on a given platform
  */
-void st_kim_ref(struct st_data_s **core_data)
+void st_kim_ref(struct st_data_s **core_data, int id)
 {
        struct platform_device  *pdev;
        struct kim_data_s       *kim_gdata;
        /* get kim_gdata reference from platform device */
-       pdev = st_get_plat_device();
+       pdev = st_get_plat_device(id);
        kim_gdata = dev_get_drvdata(&pdev->dev);
        *core_data = kim_gdata->core_data;
 }
@@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev)
        long *gpios = pdev->dev.platform_data;
        struct kim_data_s       *kim_gdata;
 
+       st_kim_devices[pdev->id] = pdev;
        kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
        if (!kim_gdata) {
                pr_err("no mem to allocate");
index 7e594449600e004c7f6ba14fa2dce39ebee25d20..9eed5b52d9de22647f6ca7e94791cd97f6e60122 100644 (file)
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS
          If you are unsure about this, say N here.
 
 config USB_SUSPEND
-       bool "USB runtime power management (suspend/resume and wakeup)"
+       bool "USB runtime power management (autosuspend) and wakeup"
        depends on USB && PM_RUNTIME
        help
          If you say Y here, you can use driver calls or the sysfs
-         "power/level" file to suspend or resume individual USB
-         peripherals and to enable or disable autosuspend (see
+         "power/control" file to enable or disable autosuspend for
+         individual USB peripherals (see
          Documentation/usb/power-management.txt for more details).
 
          Also, USB "remote wakeup" signaling is supported, whereby some
index f06f5dbc8cdc22fbedfa463c05b18672cbb99516..1e6ccef2cf0cbd9e817e7045dccfb03918efa9d7 100644 (file)
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
 int usb_register_dev(struct usb_interface *intf,
                     struct usb_class_driver *class_driver)
 {
-       int retval = -EINVAL;
+       int retval;
        int minor_base = class_driver->minor_base;
-       int minor = 0;
+       int minor;
        char name[20];
        char *temp;
 
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
         */
        minor_base = 0;
 #endif
-       intf->minor = -1;
-
-       dbg ("looking for a minor, starting at %d", minor_base);
 
        if (class_driver->fops == NULL)
-               goto exit;
+               return -EINVAL;
+       if (intf->minor >= 0)
+               return -EADDRINUSE;
+
+       retval = init_usb_class();
+       if (retval)
+               return retval;
+
+       dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
 
        down_write(&minor_rwsem);
        for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
                        continue;
 
                usb_minors[minor] = class_driver->fops;
-
-               retval = 0;
+               intf->minor = minor;
                break;
        }
        up_write(&minor_rwsem);
-
-       if (retval)
-               goto exit;
-
-       retval = init_usb_class();
-       if (retval)
-               goto exit;
-
-       intf->minor = minor;
+       if (intf->minor < 0)
+               return -EXFULL;
 
        /* create a usb class device for this usb interface */
        snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
                                      "%s", temp);
        if (IS_ERR(intf->usb_dev)) {
                down_write(&minor_rwsem);
-               usb_minors[intf->minor] = NULL;
+               usb_minors[minor] = NULL;
+               intf->minor = -1;
                up_write(&minor_rwsem);
                retval = PTR_ERR(intf->usb_dev);
        }
-exit:
        return retval;
 }
 EXPORT_SYMBOL_GPL(usb_register_dev);
index 844683e503830485147910ff16ca035a90194d27..9f0ce7de0e366fb0066dfb92d6a6403aa5f4a2b3 100644 (file)
@@ -1802,6 +1802,7 @@ free_interfaces:
                intf->dev.groups = usb_interface_groups;
                intf->dev.dma_mask = dev->dev.dma_mask;
                INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
+               intf->minor = -1;
                device_initialize(&intf->dev);
                dev_set_name(&intf->dev, "%d-%s:%d.%d",
                        dev->bus->busnum, dev->devpath,
index 59dc3d351b60269f3c3872b859c0b39d7fcbe449..5ab5bb89bae3558cfe6f296e37931680afa0e323 100644 (file)
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c,
                                index, transmit ? 'T' : 'R', cppi_ch);
        cppi_ch->hw_ep = ep;
        cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+       cppi_ch->channel.max_len = 0x7fffffff;
 
        DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
        return &cppi_ch->channel;
index 6fca870e957ed3b5061f19deec76a9472d6d5211..d065e23f123ee755f9e6fe0e661823f48d917727 100644 (file)
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req)
 #ifndef        CONFIG_MUSB_PIO_ONLY
        if (is_dma_capable() && musb_ep->dma) {
                struct dma_controller   *c = musb->dma_controller;
+               size_t request_size;
+
+               /* setup DMA, then program endpoint CSR */
+               request_size = min_t(size_t, request->length - request->actual,
+                                       musb_ep->dma->max_len);
 
                use_dma = (request->dma != DMA_ADDR_INVALID);
 
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
 
 #ifdef CONFIG_USB_INVENTRA_DMA
                {
-                       size_t request_size;
-
-                       /* setup DMA, then program endpoint CSR */
-                       request_size = min_t(size_t, request->length,
-                                               musb_ep->dma->max_len);
                        if (request_size < musb_ep->packet_sz)
                                musb_ep->dma->desired_mode = 0;
                        else
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
                use_dma = use_dma && c->channel_program(
                                musb_ep->dma, musb_ep->packet_sz,
                                0,
-                               request->dma,
-                               request->length);
+                               request->dma + request->actual,
+                               request_size);
                if (!use_dma) {
                        c->channel_release(musb_ep->dma);
                        musb_ep->dma = NULL;
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
                use_dma = use_dma && c->channel_program(
                                musb_ep->dma, musb_ep->packet_sz,
                                request->zero,
-                               request->dma,
-                               request->length);
+                               request->dma + request->actual,
+                               request_size);
 #endif
        }
 #endif
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum)
                                request->zero = 0;
                        }
 
-                       /* ... or if not, then complete it. */
-                       musb_g_giveback(musb_ep, request, 0);
-
-                       /*
-                        * Kickstart next transfer if appropriate;
-                        * the packet that just completed might not
-                        * be transmitted for hours or days.
-                        * REVISIT for double buffering...
-                        * FIXME revisit for stalls too...
-                        */
-                       musb_ep_select(mbase, epnum);
-                       csr = musb_readw(epio, MUSB_TXCSR);
-                       if (csr & MUSB_TXCSR_FIFONOTEMPTY)
-                               return;
-
-                       request = musb_ep->desc ? next_request(musb_ep) : NULL;
-                       if (!request) {
-                               DBG(4, "%s idle now\n",
-                                       musb_ep->end_point.name);
-                               return;
+                       if (request->actual == request->length) {
+                               musb_g_giveback(musb_ep, request, 0);
+                               request = musb_ep->desc ? next_request(musb_ep) : NULL;
+                               if (!request) {
+                                       DBG(4, "%s idle now\n",
+                                               musb_ep->end_point.name);
+                                       return;
+                               }
                        }
                }
 
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
 {
        const u8                epnum = req->epnum;
        struct usb_request      *request = &req->request;
-       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_out;
+       struct musb_ep          *musb_ep;
        void __iomem            *epio = musb->endpoints[epnum].regs;
        unsigned                fifo_count = 0;
-       u16                     len = musb_ep->packet_sz;
+       u16                     len;
        u16                     csr = musb_readw(epio, MUSB_RXCSR);
+       struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
+
+       if (hw_ep->is_shared_fifo)
+               musb_ep = &hw_ep->ep_in;
+       else
+               musb_ep = &hw_ep->ep_out;
+
+       len = musb_ep->packet_sz;
 
        /* We shouldn't get here while DMA is active, but we do... */
        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
         */
 
                                csr |= MUSB_RXCSR_DMAENAB;
-#ifdef USE_MODE1
                                csr |= MUSB_RXCSR_AUTOCLEAR;
+#ifdef USE_MODE1
                                /* csr |= MUSB_RXCSR_DMAMODE; */
 
                                /* this special sequence (enabling and then
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
                                if (request->actual < request->length) {
                                        int transfer_size = 0;
 #ifdef USE_MODE1
-                                       transfer_size = min(request->length,
+                                       transfer_size = min(request->length - request->actual,
                                                        channel->max_len);
 #else
-                                       transfer_size = len;
+                                       transfer_size = min(request->length - request->actual,
+                                                       (unsigned)len);
 #endif
                                        if (transfer_size <= musb_ep->packet_sz)
                                                musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
        u16                     csr;
        struct usb_request      *request;
        void __iomem            *mbase = musb->mregs;
-       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_out;
+       struct musb_ep          *musb_ep;
        void __iomem            *epio = musb->endpoints[epnum].regs;
        struct dma_channel      *dma;
+       struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
+
+       if (hw_ep->is_shared_fifo)
+               musb_ep = &hw_ep->ep_in;
+       else
+               musb_ep = &hw_ep->ep_out;
 
        musb_ep_select(mbase, epnum);
 
@@ -1081,7 +1084,7 @@ struct free_record {
 /*
  * Context: controller locked, IRQs blocked.
  */
-static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+void musb_ep_restart(struct musb *musb, struct musb_request *req)
 {
        DBG(3, "<== %s request %p len %u on hw_ep%d\n",
                req->tx ? "TX/IN" : "RX/OUT",
index c8b140325d82bf4bb6cd3ad85216fa24ec308ce7..572b1da7f2dc45ea1fd3d9bb67d3ea273d56cfae 100644 (file)
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
 
 extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
 
+extern void musb_ep_restart(struct musb *, struct musb_request *);
+
 #endif         /* __MUSB_GADGET_H */
index 59bef8f3a3585100310bbb43848ea56603a82c28..6dd03f4c5f4956983c2d1bf2552c248f6a1702a0 100644 (file)
@@ -261,6 +261,7 @@ __acquires(musb->lock)
                                        ctrlrequest->wIndex & 0x0f;
                                struct musb_ep          *musb_ep;
                                struct musb_hw_ep       *ep;
+                               struct musb_request     *request;
                                void __iomem            *regs;
                                int                     is_in;
                                u16                     csr;
@@ -302,6 +303,14 @@ __acquires(musb->lock)
                                        musb_writew(regs, MUSB_RXCSR, csr);
                                }
 
+                               /* Maybe start the first request in the queue */
+                               request = to_musb_request(
+                                               next_request(musb_ep));
+                               if (!musb_ep->busy && request) {
+                                       DBG(3, "restarting the request\n");
+                                       musb_ep_restart(musb, request);
+                               }
+
                                /* select ep0 again */
                                musb_ep_select(mbase, 0);
                                } break;
index 877d20b1dff973fd6975f258bb0b18267df1f5f5..9e65c47cc98b95761daba7186dd180550b954def 100644 (file)
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
 
        qh->segsize = length;
 
+       /*
+        * Ensure the data reaches to main memory before starting
+        * DMA transfer
+        */
+       wmb();
+
        if (!dma->channel_program(channel, pkt_size, mode,
                        urb->transfer_dma + offset, length)) {
                dma->channel_release(channel);
index 29e850a7a2f9871b7c9658e0717f8926dc5bc4ac..7c8008225ee3a47ebef2cecaf005c0631fa93da1 100644 (file)
@@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
        int r, nlogs = 0;
 
        while (datalen > 0) {
-               if (unlikely(headcount >= VHOST_NET_MAX_SG)) {
+               if (unlikely(seg >= VHOST_NET_MAX_SG)) {
                        r = -ENOBUFS;
                        goto err;
                }
index c579dcc9200ccabe0f1bcf79afed59577e4a6f38..dd3d6f7406f80092cdd8843acdd06c313e9cf164 100644 (file)
@@ -858,11 +858,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
                if (r < 0)
                        return r;
                len -= l;
-               if (!len)
+               if (!len) {
+                       if (vq->log_ctx)
+                               eventfd_signal(vq->log_ctx, 1);
                        return 0;
+               }
        }
-       if (vq->log_ctx)
-               eventfd_signal(vq->log_ctx, 1);
        /* Length written exceeds what we have stored. This is a bug. */
        BUG();
        return 0;
index 84f842331dfae0d7c4b194b42f283fae44e8a6af..7ccc967831f05bca5aba179bb88ff7880f55b92f 100644 (file)
@@ -3508,7 +3508,7 @@ static void fbcon_exit(void)
        softback_buf = 0UL;
 
        for (i = 0; i < FB_MAX; i++) {
-               int pending;
+               int pending = 0;
 
                mapped = 0;
                info = registered_fb[i];
@@ -3516,7 +3516,8 @@ static void fbcon_exit(void)
                if (info == NULL)
                        continue;
 
-               pending = cancel_work_sync(&info->queue);
+               if (info->queue.func)
+                       pending = cancel_work_sync(&info->queue);
                DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
                        "no"));
 
index 815f84b07933f7b97dc1fab249055531f6cc65b2..70477c2e4b619cd6a2ca5562771e15243cd647a3 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
 #include <linux/dmi.h>
-
+#include <linux/pci.h>
 #include <video/vga.h>
 
 static struct fb_var_screeninfo efifb_defined __devinitdata = {
@@ -39,17 +39,31 @@ enum {
        M_I20,          /* 20-Inch iMac */
        M_I20_SR,       /* 20-Inch iMac (Santa Rosa) */
        M_I24,          /* 24-Inch iMac */
+       M_I24_8_1,      /* 24-Inch iMac, 8,1th gen */
+       M_I24_10_1,     /* 24-Inch iMac, 10,1th gen */
+       M_I27_11_1,     /* 27-Inch iMac, 11,1th gen */
        M_MINI,         /* Mac Mini */
+       M_MINI_3_1,     /* Mac Mini, 3,1th gen */
+       M_MINI_4_1,     /* Mac Mini, 4,1th gen */
        M_MB,           /* MacBook */
        M_MB_2,         /* MacBook, 2nd rev. */
        M_MB_3,         /* MacBook, 3rd rev. */
+       M_MB_5_1,       /* MacBook, 5th rev. */
+       M_MB_6_1,       /* MacBook, 6th rev. */
+       M_MB_7_1,       /* MacBook, 7th rev. */
        M_MB_SR,        /* MacBook, 2nd gen, (Santa Rosa) */
        M_MBA,          /* MacBook Air */
        M_MBP,          /* MacBook Pro */
        M_MBP_2,        /* MacBook Pro 2nd gen */
+       M_MBP_2_2,      /* MacBook Pro 2,2nd gen */
        M_MBP_SR,       /* MacBook Pro (Santa Rosa) */
        M_MBP_4,        /* MacBook Pro, 4th gen */
        M_MBP_5_1,    /* MacBook Pro, 5,1th gen */
+       M_MBP_5_2,      /* MacBook Pro, 5,2th gen */
+       M_MBP_5_3,      /* MacBook Pro, 5,3rd gen */
+       M_MBP_6_1,      /* MacBook Pro, 6,1th gen */
+       M_MBP_6_2,      /* MacBook Pro, 6,2th gen */
+       M_MBP_7_1,      /* MacBook Pro, 7,1th gen */
        M_UNKNOWN       /* placeholder */
 };
 
@@ -64,14 +78,28 @@ static struct efifb_dmi_info {
        [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
        [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
        [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
+       [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
+       [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
+       [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
        [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
+       [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
+       [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
        [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
+       [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
+       [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
+       [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
        [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
        [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
        [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
+       [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
        [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
        [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
        [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
+       [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
+       [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
+       [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
+       [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
+       [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
        [M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
 };
 
@@ -92,7 +120,12 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
        /* At least one of these two will be right; maybe both? */
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -101,14 +134,23 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
+       EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
        {},
 };
 
@@ -116,7 +158,7 @@ static int set_system(const struct dmi_system_id *id)
 {
        struct efifb_dmi_info *info = id->driver_data;
        if (info->base == 0)
-               return -ENODEV;
+               return 0;
 
        printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
                         "(%dx%d, stride %d)\n", id->ident,
@@ -124,18 +166,55 @@ static int set_system(const struct dmi_system_id *id)
                         info->stride);
 
        /* Trust the bootloader over the DMI tables */
-       if (screen_info.lfb_base == 0)
+       if (screen_info.lfb_base == 0) {
+#if defined(CONFIG_PCI)
+               struct pci_dev *dev = NULL;
+               int found_bar = 0;
+#endif
                screen_info.lfb_base = info->base;
-       if (screen_info.lfb_linelength == 0)
-               screen_info.lfb_linelength = info->stride;
-       if (screen_info.lfb_width == 0)
-               screen_info.lfb_width = info->width;
-       if (screen_info.lfb_height == 0)
-               screen_info.lfb_height = info->height;
-       if (screen_info.orig_video_isVGA == 0)
-               screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
 
-       return 0;
+#if defined(CONFIG_PCI)
+               /* make sure that the address in the table is actually on a
+                * VGA device's PCI BAR */
+
+               for_each_pci_dev(dev) {
+                       int i;
+                       if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+                               continue;
+                       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+                               resource_size_t start, end;
+
+                               start = pci_resource_start(dev, i);
+                               if (start == 0)
+                                       break;
+                               end = pci_resource_end(dev, i);
+                               if (screen_info.lfb_base >= start &&
+                                               screen_info.lfb_base < end) {
+                                       found_bar = 1;
+                               }
+                       }
+               }
+               if (!found_bar)
+                       screen_info.lfb_base = 0;
+#endif
+       }
+       if (screen_info.lfb_base) {
+               if (screen_info.lfb_linelength == 0)
+                       screen_info.lfb_linelength = info->stride;
+               if (screen_info.lfb_width == 0)
+                       screen_info.lfb_width = info->width;
+               if (screen_info.lfb_height == 0)
+                       screen_info.lfb_height = info->height;
+               if (screen_info.orig_video_isVGA == 0)
+                       screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
+       } else {
+               screen_info.lfb_linelength = 0;
+               screen_info.lfb_width = 0;
+               screen_info.lfb_height = 0;
+               screen_info.orig_video_isVGA = 0;
+               return 0;
+       }
+       return 1;
 }
 
 static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
index 5d786bd3e304f1a174e682461d7d94a265ae2119..a31a77ff6f3d2a3ac2daa56baeb6ae55430f6177 100644 (file)
@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi)
         * Set bit to enable graphics DMA.
         */
        x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
-       x |= fbi->active ? 0x00000100 : 0;
-       fbi->active = 0;
+       x &= ~CFG_GRA_ENA_MASK;
+       x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
 
        /*
         * If we are in a pseudo-color mode, we need to enable
index 559bf1727a2b8f252a193d6fac276aed56124428..b52f8e4ef1fdbe3cd19c70d4fb282d15f5b99f85 100644 (file)
@@ -1701,6 +1701,9 @@ static int        sisfb_ioctl(struct fb_info *info, unsigned int cmd,
                break;
 
           case FBIOGET_VBLANK:
+
+               memset(&sisvbblank, 0, sizeof(struct fb_vblank));
+
                sisvbblank.count = 0;
                sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
 
index 29bac5118877ef2028a781e6b409e2fe36463a99..d409495876f11b24fabaebc01d57f02224fe459f 100644 (file)
@@ -755,7 +755,10 @@ int register_xenstore_notifier(struct notifier_block *nb)
 {
        int ret = 0;
 
-       blocking_notifier_chain_register(&xenstore_chain, nb);
+       if (xenstored_ready > 0)
+               ret = nb->notifier_call(nb, 0, NULL);
+       else
+               blocking_notifier_chain_register(&xenstore_chain, nb);
 
        return ret;
 }
@@ -769,7 +772,7 @@ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
 
 void xenbus_probe(struct work_struct *unused)
 {
-       BUG_ON((xenstored_ready <= 0));
+       xenstored_ready = 1;
 
        /* Enumerate devices in xenstore and watch for changes. */
        xenbus_probe_devices(&xenbus_frontend);
@@ -835,8 +838,8 @@ static int __init xenbus_init(void)
                        xen_store_evtchn = xen_start_info->store_evtchn;
                        xen_store_mfn = xen_start_info->store_mfn;
                        xen_store_interface = mfn_to_virt(xen_store_mfn);
+                       xenstored_ready = 1;
                }
-               xenstored_ready = 1;
        }
 
        /* Initialize the interface to xenstore. */
index 1320b2a05fb2aaed4b261965683d814855abefe6..250b0a73c8a8ca92b78c3a0282d2425ce7649dcf 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -712,8 +712,16 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
         */
        ret = retry(iocb);
 
-       if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED)
+       if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
+               /*
+                * There's no easy way to restart the syscall since other AIO's
+                * may be already running. Just fail this IO with EINTR.
+                */
+               if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
+                            ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
+                       ret = -EINTR;
                aio_complete(iocb, ret, 0);
+       }
 out:
        spin_lock_irq(&ctx->ctx_lock);
 
index c65c3419dd3703f12bb4994e9333c085c907ecfa..7e83b356cc9e3a93c2bc1b0e915d118884170474 100644 (file)
@@ -232,7 +232,7 @@ static int
 small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
                void **request_buf)
 {
-       int rc = 0;
+       int rc;
 
        rc = cifs_reconnect_tcon(tcon, smb_command);
        if (rc)
@@ -250,7 +250,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
        if (tcon != NULL)
                cifs_stats_inc(&tcon->num_smbs_sent);
 
-       return rc;
+       return 0;
 }
 
 int
@@ -281,16 +281,9 @@ small_smb_init_no_tc(const int smb_command, const int wct,
 
 /* If the return code is zero, this function must fill in request_buf pointer */
 static int
-smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
-        void **request_buf /* returned */ ,
-        void **response_buf /* returned */ )
+__smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+                       void **request_buf, void **response_buf)
 {
-       int rc = 0;
-
-       rc = cifs_reconnect_tcon(tcon, smb_command);
-       if (rc)
-               return rc;
-
        *request_buf = cifs_buf_get();
        if (*request_buf == NULL) {
                /* BB should we add a retry in here if not a writepage? */
@@ -309,7 +302,31 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
        if (tcon != NULL)
                cifs_stats_inc(&tcon->num_smbs_sent);
 
-       return rc;
+       return 0;
+}
+
+/* If the return code is zero, this function must fill in request_buf pointer */
+static int
+smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+        void **request_buf, void **response_buf)
+{
+       int rc;
+
+       rc = cifs_reconnect_tcon(tcon, smb_command);
+       if (rc)
+               return rc;
+
+       return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
+}
+
+static int
+smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,
+                       void **request_buf, void **response_buf)
+{
+       if (tcon->ses->need_reconnect || tcon->need_reconnect)
+               return -EHOSTDOWN;
+
+       return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
 }
 
 static int validate_t2(struct smb_t2_rsp *pSMB)
@@ -4534,8 +4551,8 @@ CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon)
 
        cFYI(1, "In QFSUnixInfo");
 QFSUnixRetry:
-       rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
-                     (void **) &pSMBr);
+       rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
+                                  (void **) &pSMB, (void **) &pSMBr);
        if (rc)
                return rc;
 
@@ -4604,8 +4621,8 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
        cFYI(1, "In SETFSUnixInfo");
 SETFSUnixRetry:
        /* BB switch to small buf init to save memory */
-       rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
-                     (void **) &pSMBr);
+       rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
+                                       (void **) &pSMB, (void **) &pSMBr);
        if (rc)
                return rc;
 
index 93f77d438d3c8f3d6702d24486c4e0f64055796b..53cce8cc2224f4abe4754d2cc320a05a6f36d215 100644 (file)
@@ -801,6 +801,8 @@ retry_iget5_locked:
                        inode->i_flags |= S_NOATIME | S_NOCMTIME;
                if (inode->i_state & I_NEW) {
                        inode->i_ino = hash;
+                       if (S_ISREG(inode->i_mode))
+                               inode->i_data.backing_dev_info = sb->s_bdi;
 #ifdef CONFIG_CIFS_FSCACHE
                        /* initialize per-inode cache cookie pointer */
                        CIFS_I(inode)->fscache = NULL;
index 718c7062aec129844cda361e2f4b8a143aa31861..0644a154672b93012a4d7a7f864ba7f505b43213 100644 (file)
@@ -1153,7 +1153,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
 {
        compat_ssize_t tot_len;
        struct iovec iovstack[UIO_FASTIOV];
-       struct iovec *iov;
+       struct iovec *iov = iovstack;
        ssize_t ret;
        io_fn_t fn;
        iov_fn_t fnv;
index 5581122bd2c00cd1f8727436c531bb5245b03b0a..ab38fef1c9a1a52eab7128fa6a7dad217d4ad744 100644 (file)
@@ -72,22 +72,11 @@ int writeback_in_progress(struct backing_dev_info *bdi)
 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
 {
        struct super_block *sb = inode->i_sb;
-       struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
 
-       /*
-        * For inodes on standard filesystems, we use superblock's bdi. For
-        * inodes on virtual filesystems, we want to use inode mapping's bdi
-        * because they can possibly point to something useful (think about
-        * block_dev filesystem).
-        */
-       if (sb->s_bdi && sb->s_bdi != &noop_backing_dev_info) {
-               /* Some device inodes could play dirty tricks. Catch them... */
-               WARN(bdi != sb->s_bdi && bdi_cap_writeback_dirty(bdi),
-                       "Dirtiable inode bdi %s != sb bdi %s\n",
-                       bdi->name, sb->s_bdi->name);
-               return sb->s_bdi;
-       }
-       return bdi;
+       if (strcmp(sb->s_type->name, "bdev") == 0)
+               return inode->i_mapping->backing_dev_info;
+
+       return sb->s_bdi;
 }
 
 static void bdi_queue_work(struct backing_dev_info *bdi,
index d367af1514efe696b50a73374fffe77784c65b6a..cde755cca5642d41fb9cbbe05ac2d01f70f53c69 100644 (file)
@@ -1354,7 +1354,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        loff_t file_size;
        unsigned int num;
        unsigned int offset;
-       size_t total_len;
+       size_t total_len = 0;
 
        req = fuse_get_req(fc);
        if (IS_ERR(req))
index a76e0aa5cd3fc5188a392639618858a7e7e34092..391915093fe1c494a58ec8feae54cd576fd2464a 100644 (file)
@@ -209,7 +209,10 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
        }
 
        inode->i_mode = new_mode;
+       inode->i_ctime = CURRENT_TIME;
        di->i_mode = cpu_to_le16(inode->i_mode);
+       di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+       di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 
        ocfs2_journal_dirty(handle, di_bh);
 
index 1361997cf205d132a04ddeb87760aa233c3e707d..cbe2f057cc2826cb6af4ed833319fe27c4bd565f 100644 (file)
@@ -977,7 +977,7 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
 int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
                           size_t caller_veclen, u8 target_node, int *status)
 {
-       int ret;
+       int ret = 0;
        struct o2net_msg *msg = NULL;
        size_t veclen, caller_bytes = 0;
        struct kvec *vec = NULL;
index f04ebcfffc4a5e1516c2a7307ffa8032db8e19d7..c49f6de0e7abb6e096ddc56e795957e0194bb8dd 100644 (file)
@@ -3931,6 +3931,15 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
                goto out_commit;
        }
 
+       cpos = split_hash;
+       ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
+                                      data_ac, meta_ac, new_dx_leaves,
+                                      num_dx_leaves);
+       if (ret) {
+               mlog_errno(ret);
+               goto out_commit;
+       }
+
        for (i = 0; i < num_dx_leaves; i++) {
                ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
                                              orig_dx_leaves[i],
@@ -3939,15 +3948,14 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
                        mlog_errno(ret);
                        goto out_commit;
                }
-       }
 
-       cpos = split_hash;
-       ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
-                                      data_ac, meta_ac, new_dx_leaves,
-                                      num_dx_leaves);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_commit;
+               ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
+                                             new_dx_leaves[i],
+                                             OCFS2_JOURNAL_ACCESS_WRITE);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out_commit;
+               }
        }
 
        ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
index 4b6ae2c13b47a85c6a31f6f7d2a1072099354935..765298908f1d3905bbabf3c5e597a8bf72ebd4a9 100644 (file)
@@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
                         struct dlm_lock_resource *res);
 void dlm_clean_master_list(struct dlm_ctxt *dlm,
                           u8 dead_node);
+void dlm_force_free_mles(struct dlm_ctxt *dlm);
 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
 int __dlm_lockres_unused(struct dlm_lock_resource *res);
index 5efdd37dfe484f2f6207ac2c7c6927fda0cc1ea0..901ca52bf86b293639ea39c8e1f460b66bc1f265 100644 (file)
@@ -636,8 +636,14 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
        spin_lock(&dlm->track_lock);
        if (oldres)
                track_list = &oldres->tracking;
-       else
+       else {
                track_list = &dlm->tracking_list;
+               if (list_empty(track_list)) {
+                       dl = NULL;
+                       spin_unlock(&dlm->track_lock);
+                       goto bail;
+               }
+       }
 
        list_for_each_entry(res, track_list, tracking) {
                if (&res->tracking == &dlm->tracking_list)
@@ -660,6 +666,7 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
        } else
                dl = NULL;
 
+bail:
        /* passed to seq_show */
        return dl;
 }
index 153abb5abef024d2ca63f6d4a23ee1c126b89f13..11a5c87fd7f7c00de41c61c00fae625e46675973 100644 (file)
@@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
 
                dlm_mark_domain_leaving(dlm);
                dlm_leave_domain(dlm);
+               dlm_force_free_mles(dlm);
                dlm_complete_dlm_shutdown(dlm);
        }
        dlm_put(dlm);
index ffb4c68dafa495bc739165eb30f718f1eb0865ec..f564b0e5f80d8c89e08eeba4a5e133b24cb67373 100644 (file)
@@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
        wake_up(&res->wq);
        wake_up(&dlm->migration_wq);
 }
+
+void dlm_force_free_mles(struct dlm_ctxt *dlm)
+{
+       int i;
+       struct hlist_head *bucket;
+       struct dlm_master_list_entry *mle;
+       struct hlist_node *tmp, *list;
+
+       /*
+        * We notified all other nodes that we are exiting the domain and
+        * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
+        * around we force free them and wake any processes that are waiting
+        * on the mles
+        */
+       spin_lock(&dlm->spinlock);
+       spin_lock(&dlm->master_lock);
+
+       BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
+       BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
+
+       for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+               bucket = dlm_master_hash(dlm, i);
+               hlist_for_each_safe(list, tmp, bucket) {
+                       mle = hlist_entry(list, struct dlm_master_list_entry,
+                                         master_hash_node);
+                       if (mle->type != DLM_MLE_BLOCK) {
+                               mlog(ML_ERROR, "bad mle: %p\n", mle);
+                               dlm_print_one_mle(mle);
+                       }
+                       atomic_set(&mle->woken, 1);
+                       wake_up(&mle->wq);
+
+                       __dlm_unlink_mle(dlm, mle);
+                       __dlm_mle_detach_hb_events(dlm, mle);
+                       __dlm_put_mle(mle);
+               }
+       }
+       spin_unlock(&dlm->master_lock);
+       spin_unlock(&dlm->spinlock);
+}
index d1ce48e1b3d6029e5861cf2863cbb0a544511b40..1d596d8c4a4a55dfd185ecaff4d7d9900bf10b24 100644 (file)
@@ -84,6 +84,7 @@ enum {
        OI_LS_PARENT,
        OI_LS_RENAME1,
        OI_LS_RENAME2,
+       OI_LS_REFLINK_TARGET,
 };
 
 int ocfs2_dlm_init(struct ocfs2_super *osb);
index 33f1c9a8258d1d4de5a17fe1ebf1d0d39c4098c3..fa31d05e41b7e558fa24df43682dbd107b568c76 100644 (file)
 #define OCFS2_HAS_REFCOUNT_FL   (0x0010)
 
 /* Inode attributes, keep in sync with EXT2 */
-#define OCFS2_SECRM_FL         (0x00000001)    /* Secure deletion */
-#define OCFS2_UNRM_FL          (0x00000002)    /* Undelete */
-#define OCFS2_COMPR_FL         (0x00000004)    /* Compress file */
-#define OCFS2_SYNC_FL          (0x00000008)    /* Synchronous updates */
-#define OCFS2_IMMUTABLE_FL     (0x00000010)    /* Immutable file */
-#define OCFS2_APPEND_FL                (0x00000020)    /* writes to file may only append */
-#define OCFS2_NODUMP_FL                (0x00000040)    /* do not dump file */
-#define OCFS2_NOATIME_FL       (0x00000080)    /* do not update atime */
-#define OCFS2_DIRSYNC_FL       (0x00010000)    /* dirsync behaviour (directories only) */
-
-#define OCFS2_FL_VISIBLE       (0x000100FF)    /* User visible flags */
-#define OCFS2_FL_MODIFIABLE    (0x000100FF)    /* User modifiable flags */
+#define OCFS2_SECRM_FL                 FS_SECRM_FL     /* Secure deletion */
+#define OCFS2_UNRM_FL                  FS_UNRM_FL      /* Undelete */
+#define OCFS2_COMPR_FL                 FS_COMPR_FL     /* Compress file */
+#define OCFS2_SYNC_FL                  FS_SYNC_FL      /* Synchronous updates */
+#define OCFS2_IMMUTABLE_FL             FS_IMMUTABLE_FL /* Immutable file */
+#define OCFS2_APPEND_FL                        FS_APPEND_FL    /* writes to file may only append */
+#define OCFS2_NODUMP_FL                        FS_NODUMP_FL    /* do not dump file */
+#define OCFS2_NOATIME_FL               FS_NOATIME_FL   /* do not update atime */
+/* Reserved for compression usage... */
+#define OCFS2_DIRTY_FL                 FS_DIRTY_FL
+#define OCFS2_COMPRBLK_FL              FS_COMPRBLK_FL  /* One or more compressed clusters */
+#define OCFS2_NOCOMP_FL                        FS_NOCOMP_FL    /* Don't compress */
+#define OCFS2_ECOMPR_FL                        FS_ECOMPR_FL    /* Compression error */
+/* End compression flags --- maybe not all used */
+#define OCFS2_BTREE_FL                 FS_BTREE_FL     /* btree format dir */
+#define OCFS2_INDEX_FL                 FS_INDEX_FL     /* hash-indexed directory */
+#define OCFS2_IMAGIC_FL                        FS_IMAGIC_FL    /* AFS directory */
+#define OCFS2_JOURNAL_DATA_FL          FS_JOURNAL_DATA_FL /* Reserved for ext3 */
+#define OCFS2_NOTAIL_FL                        FS_NOTAIL_FL    /* file tail should not be merged */
+#define OCFS2_DIRSYNC_FL               FS_DIRSYNC_FL   /* dirsync behaviour (directories only) */
+#define OCFS2_TOPDIR_FL                        FS_TOPDIR_FL    /* Top of directory hierarchies*/
+#define OCFS2_RESERVED_FL              FS_RESERVED_FL  /* reserved for ext2 lib */
+
+#define OCFS2_FL_VISIBLE               FS_FL_USER_VISIBLE      /* User visible flags */
+#define OCFS2_FL_MODIFIABLE            FS_FL_USER_MODIFIABLE   /* User modifiable flags */
 
 /*
  * Extent record flags (e_node.leaf.flags)
index 2d3420af1a839e0e13626a34c289cd2227bf095a..5d241505690b71e2ec0b259cbc39f1ed46d683d5 100644 (file)
 /*
  * ioctl commands
  */
-#define OCFS2_IOC_GETFLAGS     _IOR('f', 1, long)
-#define OCFS2_IOC_SETFLAGS     _IOW('f', 2, long)
-#define OCFS2_IOC32_GETFLAGS   _IOR('f', 1, int)
-#define OCFS2_IOC32_SETFLAGS   _IOW('f', 2, int)
+#define OCFS2_IOC_GETFLAGS     FS_IOC_GETFLAGS
+#define OCFS2_IOC_SETFLAGS     FS_IOC_SETFLAGS
+#define OCFS2_IOC32_GETFLAGS   FS_IOC32_GETFLAGS
+#define OCFS2_IOC32_SETFLAGS   FS_IOC32_SETFLAGS
 
 /*
  * Space reservation / allocation / free ioctls and argument structure
index 0afeda83120fa0e54bd2c9cc7e1f4f08c6e836bf..efdd7560740655beba6f53dccef235b5ce89cdbf 100644 (file)
@@ -4201,8 +4201,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
                goto out;
        }
 
-       mutex_lock(&new_inode->i_mutex);
-       ret = ocfs2_inode_lock(new_inode, &new_bh, 1);
+       mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
+       ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
+                                     OI_LS_REFLINK_TARGET);
        if (ret) {
                mlog_errno(ret);
                goto out_unlock;
index d8b6e4259b80022cb824326f4ce2c665089b5818..3e78db361bc70b3ffc327a6f21b6d4580305d0eb 100644 (file)
@@ -732,25 +732,23 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
                           struct ocfs2_alloc_reservation *resv,
                           int *cstart, int *clen)
 {
-       unsigned int wanted = *clen;
-
        if (resv == NULL || ocfs2_resmap_disabled(resmap))
                return -ENOSPC;
 
        spin_lock(&resv_lock);
 
-       /*
-        * We don't want to over-allocate for temporary
-        * windows. Otherwise, we run the risk of fragmenting the
-        * allocation space.
-        */
-       wanted = ocfs2_resv_window_bits(resmap, resv);
-       if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
-               wanted = *clen;
-
        if (ocfs2_resv_empty(resv)) {
-               mlog(0, "empty reservation, find new window\n");
+               /*
+                * We don't want to over-allocate for temporary
+                * windows. Otherwise, we run the risk of fragmenting the
+                * allocation space.
+                */
+               unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
 
+               if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
+                       wanted = *clen;
+
+               mlog(0, "empty reservation, find new window\n");
                /*
                 * Try to get a window here. If it works, we must fall
                 * through and test the bitmap . This avoids some
index 8a286f54dca1f30a1d65b0d6cbb5baa0fa8c063c..849c2f0e0a0e664e983adf61f620abedf87fcb72 100644 (file)
@@ -357,7 +357,7 @@ out:
 static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
                                          struct ocfs2_group_desc *bg,
                                          struct ocfs2_chain_list *cl,
-                                         u64 p_blkno, u32 clusters)
+                                         u64 p_blkno, unsigned int clusters)
 {
        struct ocfs2_extent_list *el = &bg->bg_list;
        struct ocfs2_extent_rec *rec;
@@ -369,7 +369,7 @@ static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
        rec->e_blkno = cpu_to_le64(p_blkno);
        rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
                                  le16_to_cpu(cl->cl_bpc));
-       rec->e_leaf_clusters = cpu_to_le32(clusters);
+       rec->e_leaf_clusters = cpu_to_le16(clusters);
        le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
        le16_add_cpu(&bg->bg_free_bits_count,
                     clusters * le16_to_cpu(cl->cl_bpc));
index 32499d213fc4f80f37efde6f71196283236896bf..9975457c981f904ca18a512dd7bcbe0f092ac664 100644 (file)
@@ -128,7 +128,7 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
        }
 
        /* Fast symlinks can't be large */
-       len = strlen(target);
+       len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
        link = kzalloc(len + 1, GFP_NOFS);
        if (!link) {
                status = -ENOMEM;
index d03469f618012ea4aff64b62f1f77e3e8b8ec47d..06fa5e77c40ef7aa0a3c0483ddbf3b9dd59f9dcf 100644 (file)
@@ -1286,13 +1286,11 @@ int ocfs2_xattr_get_nolock(struct inode *inode,
        xis.inode_bh = xbs.inode_bh = di_bh;
        di = (struct ocfs2_dinode *)di_bh->b_data;
 
-       down_read(&oi->ip_xattr_sem);
        ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
                                    buffer_size, &xis);
        if (ret == -ENODATA && di->i_xattr_loc)
                ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
                                            buffer_size, &xbs);
-       up_read(&oi->ip_xattr_sem);
 
        return ret;
 }
@@ -1316,8 +1314,10 @@ static int ocfs2_xattr_get(struct inode *inode,
                mlog_errno(ret);
                return ret;
        }
+       down_read(&OCFS2_I(inode)->ip_xattr_sem);
        ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
                                     name, buffer, buffer_size);
+       up_read(&OCFS2_I(inode)->ip_xattr_sem);
 
        ocfs2_inode_unlock(inode, 0);
 
index a1c43e7c8a7be4ce70c729f3338eae09097f4025..8e4addaa542458badbe0e62da644ec1c64194f99 100644 (file)
@@ -2675,7 +2675,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        INF("auxv",       S_IRUSR, proc_pid_auxv),
        ONE("status",     S_IRUGO, proc_pid_status),
        ONE("personality", S_IRUSR, proc_pid_personality),
-       INF("limits",     S_IRUSR, proc_pid_limits),
+       INF("limits",     S_IRUGO, proc_pid_limits),
 #ifdef CONFIG_SCHED_DEBUG
        REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
@@ -3011,7 +3011,7 @@ static const struct pid_entry tid_base_stuff[] = {
        INF("auxv",      S_IRUSR, proc_pid_auxv),
        ONE("status",    S_IRUGO, proc_pid_status),
        ONE("personality", S_IRUSR, proc_pid_personality),
-       INF("limits",    S_IRUSR, proc_pid_limits),
+       INF("limits",    S_IRUGO, proc_pid_limits),
 #ifdef CONFIG_SCHED_DEBUG
        REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
index 271afc48b9a5d58dd2874d41f8a08dfcae644481..1dbca4e8cc164d08276d4a1f5a875e7726fc7020 100644 (file)
@@ -363,13 +363,13 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                        mss->referenced += PAGE_SIZE;
                mapcount = page_mapcount(page);
                if (mapcount >= 2) {
-                       if (pte_dirty(ptent))
+                       if (pte_dirty(ptent) || PageDirty(page))
                                mss->shared_dirty += PAGE_SIZE;
                        else
                                mss->shared_clean += PAGE_SIZE;
                        mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
                } else {
-                       if (pte_dirty(ptent))
+                       if (pte_dirty(ptent) || PageDirty(page))
                                mss->private_dirty += PAGE_SIZE;
                        else
                                mss->private_clean += PAGE_SIZE;
index 91c817ff02c3847a874e1e99f7638b1148869a77..2367fb3f70bc6ba468ab0ccb628b5b88fabac955 100644 (file)
@@ -163,7 +163,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
 
 static const struct file_operations proc_vmcore_operations = {
        .read           = read_vmcore,
-       .llseek         = generic_file_llseek,
+       .llseek         = default_llseek,
 };
 
 static struct vmcore* __init get_new_element(void)
index f53505de071217399e39bf2013304ba46bd0c7f5..5cbb81e134aca031b21e3c88661400ff1c1b174f 100644 (file)
@@ -170,6 +170,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
 int reiserfs_unpack(struct inode *inode, struct file *filp)
 {
        int retval = 0;
+       int depth;
        int index;
        struct page *page;
        struct address_space *mapping;
@@ -188,8 +189,8 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
        /* we need to make sure nobody is changing the file size beneath
         ** us
         */
-       mutex_lock(&inode->i_mutex);
-       reiserfs_write_lock(inode->i_sb);
+       reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
+       depth = reiserfs_write_lock_once(inode->i_sb);
 
        write_from = inode->i_size & (blocksize - 1);
        /* if we are on a block boundary, we are already unpacked.  */
@@ -224,6 +225,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
 
       out:
        mutex_unlock(&inode->i_mutex);
-       reiserfs_write_unlock(inode->i_sb);
+       reiserfs_write_unlock_once(inode->i_sb, depth);
        return retval;
 }
index ed575fb4b49597806200f676680ff787d5be9d12..7e206fc1fa362ed4bb761a8f5ad11ef39c2dd5c0 100644 (file)
@@ -405,9 +405,15 @@ xlog_cil_push(
        new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
        new_ctx->ticket = xlog_cil_ticket_alloc(log);
 
-       /* lock out transaction commit, but don't block on background push */
+       /*
+        * Lock out transaction commit, but don't block for background pushes
+        * unless we are well over the CIL space limit. See the definition of
+        * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic
+        * used here.
+        */
        if (!down_write_trylock(&cil->xc_ctx_lock)) {
-               if (!push_seq)
+               if (!push_seq &&
+                   cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))
                        goto out_free_ticket;
                down_write(&cil->xc_ctx_lock);
        }
@@ -422,7 +428,7 @@ xlog_cil_push(
                goto out_skip;
 
        /* check for a previously pushed seqeunce */
-       if (push_seq < cil->xc_ctx->sequence)
+       if (push_seq && push_seq < cil->xc_ctx->sequence)
                goto out_skip;
 
        /*
index ced52b98b322e3eb1be0e0c7dfc70f6096d0cd80..edcdfe01617f673bc1047caca3acc243f66a0658 100644 (file)
@@ -426,13 +426,13 @@ struct xfs_cil {
 };
 
 /*
- * The amount of log space we should the CIL to aggregate is difficult to size.
- * Whatever we chose we have to make we can get a reservation for the log space
- * effectively, that it is large enough to capture sufficient relogging to
- * reduce log buffer IO significantly, but it is not too large for the log or
- * induces too much latency when writing out through the iclogs. We track both
- * space consumed and the number of vectors in the checkpoint context, so we
- * need to decide which to use for limiting.
+ * The amount of log space we allow the CIL to aggregate is difficult to size.
+ * Whatever we choose, we have to make sure we can get a reservation for the
+ * log space effectively, that it is large enough to capture sufficient
+ * relogging to reduce log buffer IO significantly, but it is not too large for
+ * the log or induces too much latency when writing out through the iclogs. We
+ * track both space consumed and the number of vectors in the checkpoint
+ * context, so we need to decide which to use for limiting.
  *
  * Every log buffer we write out during a push needs a header reserved, which
  * is at least one sector and more for v2 logs. Hence we need a reservation of
@@ -459,16 +459,21 @@ struct xfs_cil {
  * checkpoint transaction ticket is specific to the checkpoint context, rather
  * than the CIL itself.
  *
- * With dynamic reservations, we can basically make up arbitrary limits for the
- * checkpoint size so long as they don't violate any other size rules.  Hence
- * the initial maximum size for the checkpoint transaction will be set to a
- * quarter of the log or 8MB, which ever is smaller. 8MB is an arbitrary limit
- * right now based on the latency of writing out a large amount of data through
- * the circular iclog buffers.
+ * With dynamic reservations, we can effectively make up arbitrary limits for
+ * the checkpoint size so long as they don't violate any other size rules.
+ * Recovery imposes a rule that no transaction exceed half the log, so we are
+ * limited by that.  Furthermore, the log transaction reservation subsystem
+ * tries to keep 25% of the log free, so we need to keep below that limit or we
+ * risk running out of free log space to start any new transactions.
+ *
+ * In order to keep background CIL push efficient, we will set a lower
+ * threshold at which background pushing is attempted without blocking current
+ * transaction commits.  A separate, higher bound defines when CIL pushes are
+ * enforced to ensure we stay within our maximum checkpoint size bounds.
+ * threshold, yet give us plenty of space for aggregation on large logs.
  */
-
-#define XLOG_CIL_SPACE_LIMIT(log)      \
-       (min((log->l_logsize >> 2), (8 * 1024 * 1024)))
+#define XLOG_CIL_SPACE_LIMIT(log)      (log->l_logsize >> 3)
+#define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4))
 
 /*
  * The reservation head lsn is not made up of a cycle number and block number.
index c0786d446a00b88adf144ab97f05af06679cccf7..984cdc62e30bc52da4cef907f3dd5094aa5ffd71 100644 (file)
@@ -55,7 +55,7 @@
 extern u8 acpi_gbl_permanent_mmap;
 
 /*
- * Globals that are publically available, allowing for
+ * Globals that are publicly available, allowing for
  * run time configuration
  */
 extern u32 acpi_dbg_level;
index 62f59080e5cc215edb843f928763d9898055071f..04d0a977cd431fc5eb2c2f34cbd5390bfeb0db05 100644 (file)
@@ -3,13 +3,13 @@
 
 #include <linux/cache.h>
 #include <linux/threads.h>
-#include <linux/irq.h>
 
 typedef struct {
        unsigned int __softirq_pending;
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#include <linux/irq.h>
 
 #ifndef ack_bad_irq
 static inline void ack_bad_irq(unsigned int irq)
index 8a92a170fb7dfd87710bb26b3c7f384dd6801363..ef2af9948eacc9b9f27a706b2febeeb0272b0910 100644 (file)
                                                                        \
        BUG_TABLE                                                       \
                                                                        \
+       JUMP_TABLE                                                      \
+                                                                       \
        /* PCI quirks */                                                \
        .pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {        \
                VMLINUX_SYMBOL(__start_pci_fixups_early) = .;           \
 #define BUG_TABLE
 #endif
 
+#define JUMP_TABLE                                                     \
+       . = ALIGN(8);                                                   \
+       __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) {           \
+               VMLINUX_SYMBOL(__start___jump_table) = .;               \
+               *(__jump_table)                                         \
+               VMLINUX_SYMBOL(__stop___jump_table) = .;                \
+       }
+
 #ifdef CONFIG_PM_TRACE
 #define TRACEDATA                                                      \
        . = ALIGN(4);                                                   \
index 7809d230adee3f90c9537f6c00ec66bb53c1bd27..4c9461a4f9e67b4b3e67c5bb73192aed44695079 100644 (file)
@@ -612,7 +612,7 @@ struct drm_gem_object {
        struct kref refcount;
 
        /** Handle count of this object. Each handle also holds a reference */
-       struct kref handlecount;
+       atomic_t handle_count; /* number of handles on this object */
 
        /** Related drm device */
        struct drm_device *dev;
@@ -808,7 +808,6 @@ struct drm_driver {
         */
        int (*gem_init_object) (struct drm_gem_object *obj);
        void (*gem_free_object) (struct drm_gem_object *obj);
-       void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
 
        /* vga arb irq handler */
        void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1175,6 +1174,7 @@ extern int drm_release(struct inode *inode, struct file *filp);
 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
 extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
 extern void drm_vm_open_locked(struct vm_area_struct *vma);
+extern void drm_vm_close_locked(struct vm_area_struct *vma);
 extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
 extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
@@ -1455,12 +1455,11 @@ int drm_gem_init(struct drm_device *dev);
 void drm_gem_destroy(struct drm_device *dev);
 void drm_gem_object_release(struct drm_gem_object *obj);
 void drm_gem_object_free(struct kref *kref);
-void drm_gem_object_free_unlocked(struct kref *kref);
 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
                                            size_t size);
 int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size);
-void drm_gem_object_handle_free(struct kref *kref);
+void drm_gem_object_handle_free(struct drm_gem_object *obj);
 void drm_gem_vm_open(struct vm_area_struct *vma);
 void drm_gem_vm_close(struct vm_area_struct *vma);
 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -1483,8 +1482,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
 static inline void
 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
 {
-       if (obj != NULL)
-               kref_put(&obj->refcount, drm_gem_object_free_unlocked);
+       if (obj != NULL) {
+               struct drm_device *dev = obj->dev;
+               mutex_lock(&dev->struct_mutex);
+               kref_put(&obj->refcount, drm_gem_object_free);
+               mutex_unlock(&dev->struct_mutex);
+       }
 }
 
 int drm_gem_handle_create(struct drm_file *file_priv,
@@ -1495,7 +1498,7 @@ static inline void
 drm_gem_object_handle_reference(struct drm_gem_object *obj)
 {
        drm_gem_object_reference(obj);
-       kref_get(&obj->handlecount);
+       atomic_inc(&obj->handle_count);
 }
 
 static inline void
@@ -1504,12 +1507,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
        if (obj == NULL)
                return;
 
+       if (atomic_read(&obj->handle_count) == 0)
+               return;
        /*
         * Must bump handle count first as this may be the last
         * ref, in which case the object would disappear before we
         * checked for a name
         */
-       kref_put(&obj->handlecount, drm_gem_object_handle_free);
+       if (atomic_dec_and_test(&obj->handle_count))
+               drm_gem_object_handle_free(obj);
        drm_gem_object_unreference(obj);
 }
 
@@ -1519,12 +1525,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
        if (obj == NULL)
                return;
 
+       if (atomic_read(&obj->handle_count) == 0)
+               return;
+
        /*
        * Must bump handle count first as this may be the last
        * ref, in which case the object would disappear before we
        * checked for a name
        */
-       kref_put(&obj->handlecount, drm_gem_object_handle_free);
+
+       if (atomic_dec_and_test(&obj->handle_count))
+               drm_gem_object_handle_free(obj);
        drm_gem_object_unreference_unlocked(obj);
 }
 
index 3a9940ef728bb5d2412c4cb54d84ed746e15d870..883c1d4398996d8ba807ca5d54940cec28c7809e 100644 (file)
@@ -85,7 +85,6 @@
        {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
-       {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
        {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
index 36ca9721a0c28a0150b05153b4f442d2c558d6d4..1be416bbbb82540802a0a742ba2c22934a9a6659 100644 (file)
@@ -53,6 +53,7 @@ struct cpuidle_state {
 #define CPUIDLE_FLAG_BALANCED  (0x40) /* medium latency, moderate savings */
 #define CPUIDLE_FLAG_DEEP      (0x80) /* high latency, large savings */
 #define CPUIDLE_FLAG_IGNORE    (0x100) /* ignore during this idle period */
+#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
index ce29b8151198b6c0d40b8f420efe9c2c920d9b4f..ba8319ae5fcc3e75edc0d52240da0b16e18da68d 100644 (file)
@@ -102,6 +102,9 @@ static inline u64 dma_get_mask(struct device *dev)
        return DMA_BIT_MASK(32);
 }
 
+#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
+int dma_set_coherent_mask(struct device *dev, u64 mask);
+#else
 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
 {
        if (!dma_supported(dev, mask))
@@ -109,6 +112,7 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
        dev->coherent_dma_mask = mask;
        return 0;
 }
+#endif
 
 extern u64 dma_get_required_mask(struct device *dev);
 
index c61d4ca27bcc26906699101b4f8eee7ce8e8525b..e2106495cc11383ad9a14d7ac119400b18a8f83a 100644 (file)
@@ -548,7 +548,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
        return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
 }
 
-static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
+static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
 {
        return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
 }
index 52c0da4bdd18fe5969fbf9c583652c0f7f00ba07..bef3cda44c4c4bb54570600a99fb0003db7038e7 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _DYNAMIC_DEBUG_H
 #define _DYNAMIC_DEBUG_H
 
+#include <linux/jump_label.h>
+
 /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
  * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
  * use independent hash functions, to reduce the chance of false positives.
@@ -22,8 +24,6 @@ struct _ddebug {
        const char *function;
        const char *filename;
        const char *format;
-       char primary_hash;
-       char secondary_hash;
        unsigned int lineno:24;
        /*
         * The flags field controls the behaviour at the callsite.
@@ -33,6 +33,7 @@ struct _ddebug {
 #define _DPRINTK_FLAGS_PRINT   (1<<0)  /* printk() a message using the format */
 #define _DPRINTK_FLAGS_DEFAULT 0
        unsigned int flags:8;
+       char enabled;
 } __attribute__((aligned(8)));
 
 
@@ -42,33 +43,35 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
 #if defined(CONFIG_DYNAMIC_DEBUG)
 extern int ddebug_remove_module(const char *mod_name);
 
-#define __dynamic_dbg_enabled(dd)  ({       \
-       int __ret = 0;                                                       \
-       if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) &&        \
-                       (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2))))   \
-                               if (unlikely(dd.flags))                      \
-                                       __ret = 1;                           \
-       __ret; })
-
 #define dynamic_pr_debug(fmt, ...) do {                                        \
+       __label__ do_printk;                                            \
+       __label__ out;                                                  \
        static struct _ddebug descriptor                                \
        __used                                                          \
        __attribute__((section("__verbose"), aligned(8))) =             \
-       { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH,  \
-               DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT };        \
-       if (__dynamic_dbg_enabled(descriptor))                          \
-               printk(KERN_DEBUG pr_fmt(fmt),  ##__VA_ARGS__);         \
+       { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__,            \
+               _DPRINTK_FLAGS_DEFAULT };                               \
+       JUMP_LABEL(&descriptor.enabled, do_printk);                     \
+       goto out;                                                       \
+do_printk:                                                             \
+       printk(KERN_DEBUG pr_fmt(fmt),  ##__VA_ARGS__);                 \
+out:   ;                                                               \
        } while (0)
 
 
 #define dynamic_dev_dbg(dev, fmt, ...) do {                            \
+       __label__ do_printk;                                            \
+       __label__ out;                                                  \
        static struct _ddebug descriptor                                \
        __used                                                          \
        __attribute__((section("__verbose"), aligned(8))) =             \
-       { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH,  \
-               DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT };        \
-       if (__dynamic_dbg_enabled(descriptor))                          \
-               dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);        \
+       { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__,            \
+               _DPRINTK_FLAGS_DEFAULT };                               \
+       JUMP_LABEL(&descriptor.enabled, do_printk);                     \
+       goto out;                                                       \
+do_printk:                                                             \
+       dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);                \
+out:   ;                                                               \
        } while (0)
 
 #else
index 02b8b24f8f51f0e37156731ba94da19ba2d19131..8beabb958f61d5147c8893f1e780415a91fcb2e6 100644 (file)
@@ -191,8 +191,8 @@ struct ftrace_event_call {
        unsigned int            flags;
 
 #ifdef CONFIG_PERF_EVENTS
-       int                     perf_refcount;
-       struct hlist_head       *perf_events;
+       int                             perf_refcount;
+       struct hlist_head __percpu      *perf_events;
 #endif
 };
 
@@ -252,8 +252,8 @@ DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
 
 extern int  perf_trace_init(struct perf_event *event);
 extern void perf_trace_destroy(struct perf_event *event);
-extern int  perf_trace_enable(struct perf_event *event);
-extern void perf_trace_disable(struct perf_event *event);
+extern int  perf_trace_add(struct perf_event *event, int flags);
+extern void perf_trace_del(struct perf_event *event, int flags);
 extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
                                     char *filter_str);
 extern void ftrace_profile_free_filter(struct perf_event *event);
index a0384a4d1e6f4da4d39a02c0f8ba6634842dd236..531495db17081efabcb649a3740673f03a531e67 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
+#include <trace/events/irq.h>
 
 /*
  * These correspond to the IORESOURCE_IRQ_* defines in
@@ -407,7 +408,12 @@ asmlinkage void do_softirq(void);
 asmlinkage void __do_softirq(void);
 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
 extern void softirq_init(void);
-#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
+static inline void __raise_softirq_irqoff(unsigned int nr)
+{
+       trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL);
+       or_softirq_pending(1UL << nr);
+}
+
 extern void raise_softirq_irqoff(unsigned int nr);
 extern void raise_softirq(unsigned int nr);
 extern void wakeup_softirqd(void);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
new file mode 100644 (file)
index 0000000..b72cd9f
--- /dev/null
@@ -0,0 +1,64 @@
+#ifndef _LINUX_JUMP_LABEL_H
+#define _LINUX_JUMP_LABEL_H
+
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL)
+# include <asm/jump_label.h>
+# define HAVE_JUMP_LABEL
+#endif
+
+enum jump_label_type {
+       JUMP_LABEL_ENABLE,
+       JUMP_LABEL_DISABLE
+};
+
+struct module;
+
+#ifdef HAVE_JUMP_LABEL
+
+extern struct jump_entry __start___jump_table[];
+extern struct jump_entry __stop___jump_table[];
+
+extern void arch_jump_label_transform(struct jump_entry *entry,
+                                enum jump_label_type type);
+extern void arch_jump_label_text_poke_early(jump_label_t addr);
+extern void jump_label_update(unsigned long key, enum jump_label_type type);
+extern void jump_label_apply_nops(struct module *mod);
+extern int jump_label_text_reserved(void *start, void *end);
+
+#define enable_jump_label(key) \
+       jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE);
+
+#define disable_jump_label(key) \
+       jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE);
+
+#else
+
+#define JUMP_LABEL(key, label)                 \
+do {                                           \
+       if (unlikely(*key))                     \
+               goto label;                     \
+} while (0)
+
+#define enable_jump_label(cond_var)    \
+do {                                   \
+       *(cond_var) = 1;                        \
+} while (0)
+
+#define disable_jump_label(cond_var)   \
+do {                                   \
+       *(cond_var) = 0;                        \
+} while (0)
+
+static inline int jump_label_apply_nops(struct module *mod)
+{
+       return 0;
+}
+
+static inline int jump_label_text_reserved(void *start, void *end)
+{
+       return 0;
+}
+
+#endif
+
+#endif
index 8a6b9fdc7ffae0c20335b8a2c73726396a934ee2..b29e7458b96642b2f36162aa755b10ff76b15abe 100644 (file)
@@ -350,7 +350,10 @@ struct module
        struct tracepoint *tracepoints;
        unsigned int num_tracepoints;
 #endif
-
+#ifdef HAVE_JUMP_LABEL
+       struct jump_entry *jump_entries;
+       unsigned int num_jump_entries;
+#endif
 #ifdef CONFIG_TRACING
        const char **trace_bprintk_fmt_start;
        unsigned int num_trace_bprintk_fmt;
@@ -686,17 +689,16 @@ extern int module_sysfs_initialized;
 
 
 #ifdef CONFIG_GENERIC_BUG
-int  module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
+void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
                         struct module *);
 void module_bug_cleanup(struct module *);
 
 #else  /* !CONFIG_GENERIC_BUG */
 
-static inline int  module_bug_finalize(const Elf_Ehdr *hdr,
+static inline void module_bug_finalize(const Elf_Ehdr *hdr,
                                        const Elf_Shdr *sechdrs,
                                        struct module *mod)
 {
-       return 0;
 }
 static inline void module_bug_cleanup(struct module *mod) {}
 #endif /* CONFIG_GENERIC_BUG */
index 59d066936ab9e0414d2cddc5123a9b7e0ac3c3ad..123566912d7312f276bf975cd1a48c97b2ffa830 100644 (file)
@@ -27,8 +27,6 @@
 
 #define MAX_LINKS 32           
 
-struct net;
-
 struct sockaddr_nl {
        sa_family_t     nl_family;      /* AF_NETLINK   */
        unsigned short  nl_pad;         /* zero         */
@@ -151,6 +149,8 @@ struct nlattr {
 #include <linux/capability.h>
 #include <linux/skbuff.h>
 
+struct net;
+
 static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
 {
        return (struct nlmsghdr *)skb->data;
index 10d33309e9a61351aa4d3597540f201c97dc9294..570fddeb0388f62b7b8dd0a32ec5136123856033 100644 (file)
 #define PCI_DEVICE_ID_VLSI_82C147      0x0105
 #define PCI_DEVICE_ID_VLSI_VAS96011    0x0702
 
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU      0x5a23
+
 #define PCI_VENDOR_ID_ADL              0x1005
 #define PCI_DEVICE_ID_ADL_2301         0x2301
 
index 49466b13c5c6b310f36b31c052573864df6f02a0..0eb50832aa00fd1bbc31cc23837abfc698412402 100644 (file)
        preempt_enable();                               \
 } while (0)
 
+#define get_cpu_ptr(var) ({                            \
+       preempt_disable();                              \
+       this_cpu_ptr(var); })
+
+#define put_cpu_ptr(var) do {                          \
+       (void)(var);                                    \
+       preempt_enable();                               \
+} while (0)
+
 #ifdef CONFIG_SMP
 
 /* minimum unit size, also is the maximum supported allocation size */
index 33f08dafda2fc231f13aa60f59574ba1b01eb250..a9227e9852074808f84cbb22cd93250d701c6bb4 100644 (file)
@@ -529,7 +529,6 @@ struct hw_perf_event {
                        int             last_cpu;
                };
                struct { /* software */
-                       s64             remaining;
                        struct hrtimer  hrtimer;
                };
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
@@ -539,6 +538,7 @@ struct hw_perf_event {
                };
 #endif
        };
+       int                             state;
        local64_t                       prev_count;
        u64                             sample_period;
        u64                             last_period;
@@ -550,6 +550,13 @@ struct hw_perf_event {
 #endif
 };
 
+/*
+ * hw_perf_event::state flags
+ */
+#define PERF_HES_STOPPED       0x01 /* the counter is stopped */
+#define PERF_HES_UPTODATE      0x02 /* event->count up-to-date */
+#define PERF_HES_ARCH          0x04
+
 struct perf_event;
 
 /*
@@ -561,36 +568,70 @@ struct perf_event;
  * struct pmu - generic performance monitoring unit
  */
 struct pmu {
-       int (*enable)                   (struct perf_event *event);
-       void (*disable)                 (struct perf_event *event);
-       int (*start)                    (struct perf_event *event);
-       void (*stop)                    (struct perf_event *event);
-       void (*read)                    (struct perf_event *event);
-       void (*unthrottle)              (struct perf_event *event);
+       struct list_head                entry;
+
+       int * __percpu                  pmu_disable_count;
+       struct perf_cpu_context * __percpu pmu_cpu_context;
+       int                             task_ctx_nr;
 
        /*
-        * Group events scheduling is treated as a transaction, add group
-        * events as a whole and perform one schedulability test. If the test
-        * fails, roll back the whole group
+        * Fully disable/enable this PMU, can be used to protect from the PMI
+        * as well as for lazy/batch writing of the MSRs.
         */
+       void (*pmu_enable)              (struct pmu *pmu); /* optional */
+       void (*pmu_disable)             (struct pmu *pmu); /* optional */
 
        /*
-        * Start the transaction, after this ->enable() doesn't need
-        * to do schedulability tests.
+        * Try and initialize the event for this PMU.
+        * Should return -ENOENT when the @event doesn't match this PMU.
         */
-       void (*start_txn)       (const struct pmu *pmu);
+       int (*event_init)               (struct perf_event *event);
+
+#define PERF_EF_START  0x01            /* start the counter when adding    */
+#define PERF_EF_RELOAD 0x02            /* reload the counter when starting */
+#define PERF_EF_UPDATE 0x04            /* update the counter when stopping */
+
        /*
-        * If ->start_txn() disabled the ->enable() schedulability test
+        * Adds/Removes a counter to/from the PMU, can be done inside
+        * a transaction, see the ->*_txn() methods.
+        */
+       int  (*add)                     (struct perf_event *event, int flags);
+       void (*del)                     (struct perf_event *event, int flags);
+
+       /*
+        * Starts/Stops a counter present on the PMU. The PMI handler
+        * should stop the counter when perf_event_overflow() returns
+        * !0. ->start() will be used to continue.
+        */
+       void (*start)                   (struct perf_event *event, int flags);
+       void (*stop)                    (struct perf_event *event, int flags);
+
+       /*
+        * Updates the counter value of the event.
+        */
+       void (*read)                    (struct perf_event *event);
+
+       /*
+        * Group events scheduling is treated as a transaction, add
+        * group events as a whole and perform one schedulability test.
+        * If the test fails, roll back the whole group
+        *
+        * Start the transaction, after this ->add() doesn't need to
+        * do schedulability tests.
+        */
+       void (*start_txn)       (struct pmu *pmu); /* optional */
+       /*
+        * If ->start_txn() disabled the ->add() schedulability test
         * then ->commit_txn() is required to perform one. On success
         * the transaction is closed. On error the transaction is kept
         * open until ->cancel_txn() is called.
         */
-       int  (*commit_txn)      (const struct pmu *pmu);
+       int  (*commit_txn)      (struct pmu *pmu); /* optional */
        /*
-        * Will cancel the transaction, assumes ->disable() is called for
-        * each successfull ->enable() during the transaction.
+        * Will cancel the transaction, assumes ->del() is called
+        * for each successfull ->add() during the transaction.
         */
-       void (*cancel_txn)      (const struct pmu *pmu);
+       void (*cancel_txn)      (struct pmu *pmu); /* optional */
 };
 
 /**
@@ -669,7 +710,7 @@ struct perf_event {
        int                             nr_siblings;
        int                             group_flags;
        struct perf_event               *group_leader;
-       const struct pmu                *pmu;
+       struct pmu                      *pmu;
 
        enum perf_event_active_state    state;
        unsigned int                    attach_state;
@@ -763,12 +804,19 @@ struct perf_event {
 #endif /* CONFIG_PERF_EVENTS */
 };
 
+enum perf_event_context_type {
+       task_context,
+       cpu_context,
+};
+
 /**
  * struct perf_event_context - event context structure
  *
  * Used as a container for task events and CPU events as well:
  */
 struct perf_event_context {
+       enum perf_event_context_type    type;
+       struct pmu                      *pmu;
        /*
         * Protect the states of the events in the list,
         * nr_active, and the list:
@@ -808,6 +856,12 @@ struct perf_event_context {
        struct rcu_head                 rcu_head;
 };
 
+/*
+ * Number of contexts where an event can trigger:
+ *     task, softirq, hardirq, nmi.
+ */
+#define PERF_NR_CONTEXTS       4
+
 /**
  * struct perf_event_cpu_context - per cpu event context structure
  */
@@ -815,18 +869,9 @@ struct perf_cpu_context {
        struct perf_event_context       ctx;
        struct perf_event_context       *task_ctx;
        int                             active_oncpu;
-       int                             max_pertask;
        int                             exclusive;
-       struct swevent_hlist            *swevent_hlist;
-       struct mutex                    hlist_mutex;
-       int                             hlist_refcount;
-
-       /*
-        * Recursion avoidance:
-        *
-        * task, softirq, irq, nmi context
-        */
-       int                             recursion[4];
+       struct list_head                rotation_list;
+       int                             jiffies_interval;
 };
 
 struct perf_output_handle {
@@ -842,28 +887,22 @@ struct perf_output_handle {
 
 #ifdef CONFIG_PERF_EVENTS
 
-/*
- * Set by architecture code:
- */
-extern int perf_max_events;
-
-extern const struct pmu *hw_perf_event_init(struct perf_event *event);
+extern int perf_pmu_register(struct pmu *pmu);
+extern void perf_pmu_unregister(struct pmu *pmu);
 
 extern int perf_num_counters(void);
 extern const char *perf_pmu_name(void);
 extern void perf_event_task_sched_in(struct task_struct *task);
 extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
-extern void perf_event_task_tick(struct task_struct *task);
 extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
+extern void perf_event_delayed_put(struct task_struct *task);
 extern void set_perf_event_pending(void);
 extern void perf_event_do_pending(void);
 extern void perf_event_print_debug(void);
-extern void __perf_disable(void);
-extern bool __perf_enable(void);
-extern void perf_disable(void);
-extern void perf_enable(void);
+extern void perf_pmu_disable(struct pmu *pmu);
+extern void perf_pmu_enable(struct pmu *pmu);
 extern int perf_event_task_disable(void);
 extern int perf_event_task_enable(void);
 extern void perf_event_update_userpage(struct perf_event *event);
@@ -871,7 +910,7 @@ extern int perf_event_release_kernel(struct perf_event *event);
 extern struct perf_event *
 perf_event_create_kernel_counter(struct perf_event_attr *attr,
                                int cpu,
-                               pid_t pid,
+                               struct task_struct *task,
                                perf_overflow_handler_t callback);
 extern u64 perf_event_read_value(struct perf_event *event,
                                 u64 *enabled, u64 *running);
@@ -922,14 +961,7 @@ extern int perf_event_overflow(struct perf_event *event, int nmi,
  */
 static inline int is_software_event(struct perf_event *event)
 {
-       switch (event->attr.type) {
-       case PERF_TYPE_SOFTWARE:
-       case PERF_TYPE_TRACEPOINT:
-       /* for now the breakpoint stuff also works as software event */
-       case PERF_TYPE_BREAKPOINT:
-               return 1;
-       }
-       return 0;
+       return event->pmu->task_ctx_nr == perf_sw_context;
 }
 
 extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@ -978,7 +1010,21 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
 extern void perf_event_comm(struct task_struct *tsk);
 extern void perf_event_fork(struct task_struct *tsk);
 
-extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+/* Callchains */
+DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
+
+extern void perf_callchain_user(struct perf_callchain_entry *entry,
+                               struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                                 struct pt_regs *regs);
+
+
+static inline void
+perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+       if (entry->nr < PERF_MAX_STACK_DEPTH)
+               entry->ip[entry->nr++] = ip;
+}
 
 extern int sysctl_perf_event_paranoid;
 extern int sysctl_perf_event_mlock;
@@ -1021,21 +1067,19 @@ extern int perf_swevent_get_recursion_context(void);
 extern void perf_swevent_put_recursion_context(int rctx);
 extern void perf_event_enable(struct perf_event *event);
 extern void perf_event_disable(struct perf_event *event);
+extern void perf_event_task_tick(void);
 #else
 static inline void
 perf_event_task_sched_in(struct task_struct *task)                     { }
 static inline void
 perf_event_task_sched_out(struct task_struct *task,
                            struct task_struct *next)                   { }
-static inline void
-perf_event_task_tick(struct task_struct *task)                         { }
 static inline int perf_event_init_task(struct task_struct *child)      { return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)     { }
 static inline void perf_event_free_task(struct task_struct *task)      { }
+static inline void perf_event_delayed_put(struct task_struct *task)    { }
 static inline void perf_event_do_pending(void)                         { }
 static inline void perf_event_print_debug(void)                                { }
-static inline void perf_disable(void)                                  { }
-static inline void perf_enable(void)                                   { }
 static inline int perf_event_task_disable(void)                                { return -EINVAL; }
 static inline int perf_event_task_enable(void)                         { return -EINVAL; }
 
@@ -1058,6 +1102,7 @@ static inline int  perf_swevent_get_recursion_context(void)               { return -1; }
 static inline void perf_swevent_put_recursion_context(int rctx)                { }
 static inline void perf_event_enable(struct perf_event *event)         { }
 static inline void perf_event_disable(struct perf_event *event)                { }
+static inline void perf_event_task_tick(void)                          { }
 #endif
 
 #define perf_output_put(handle, x) \
index 9fbc54a2585d42cb9276adf2c2d168f53e883f63..83af1f8d8b746cde3b8369d7125f3cdfe07da6d3 100644 (file)
@@ -454,7 +454,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
  * Makes rcu_dereference_check() do the dirty work.
  */
 #define rcu_dereference_bh(p) \
-               rcu_dereference_check(p, rcu_read_lock_bh_held())
+               rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled())
 
 /**
  * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
index 1e2a6db2d7dd03466bf850dc5011860c23e8f9c9..eb3c1ceec06e1f9f1f2833258c737008d8df79b9 100644 (file)
@@ -1160,6 +1160,13 @@ struct sched_rt_entity {
 
 struct rcu_node;
 
+enum perf_event_task_context {
+       perf_invalid_context = -1,
+       perf_hw_context = 0,
+       perf_sw_context,
+       perf_nr_task_contexts,
+};
+
 struct task_struct {
        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
        void *stack;
@@ -1431,7 +1438,7 @@ struct task_struct {
        struct futex_pi_state *pi_state_cache;
 #endif
 #ifdef CONFIG_PERF_EVENTS
-       struct perf_event_context *perf_event_ctxp;
+       struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
        struct mutex perf_event_mutex;
        struct list_head perf_event_list;
 #endif
index a2fada9becb60c47fcbf8143b3b3c67d291731b8..a8f56e1ec7602705d70ca326e620f1a8927e3925 100644 (file)
@@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
                                          int offset, 
                                          unsigned int len, __wsum *csump);
 
-extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
+extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
 extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
 extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
                             int offset, int len);
index 6b524a0d02e42b14419c5ae75133360a1f4874a5..1808960c50595908935455ca90a80fc02e421ef0 100644 (file)
@@ -126,8 +126,8 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
 
 #else   /* CONFIG_STOP_MACHINE && CONFIG_SMP */
 
-static inline int stop_machine(int (*fn)(void *), void *data,
-                              const struct cpumask *cpus)
+static inline int __stop_machine(int (*fn)(void *), void *data,
+                                const struct cpumask *cpus)
 {
        int ret;
        local_irq_disable();
@@ -136,5 +136,11 @@ static inline int stop_machine(int (*fn)(void *), void *data,
        return ret;
 }
 
+static inline int stop_machine(int (*fn)(void *), void *data,
+                              const struct cpumask *cpus)
+{
+       return __stop_machine(fn, data, cpus);
+}
+
 #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
 #endif /* _LINUX_STOP_MACHINE */
index 103d1b61aacba635bb7c81cef9e32b70fe220bb7..a4a90b6726ce6129b43174609fb3e35a2bd088ae 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/rcupdate.h>
+#include <linux/jump_label.h>
 
 struct module;
 struct tracepoint;
@@ -145,7 +146,9 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
        extern struct tracepoint __tracepoint_##name;                   \
        static inline void trace_##name(proto)                          \
        {                                                               \
-               if (unlikely(__tracepoint_##name.state))                \
+               JUMP_LABEL(&__tracepoint_##name.state, do_trace);       \
+               return;                                                 \
+do_trace:                                                              \
                        __DO_TRACE(&__tracepoint_##name,                \
                                TP_PROTO(data_proto),                   \
                                TP_ARGS(data_args));                    \
index 0836ccc5712146f87d13e9e35a9480e980708392..3efc9f3f43a0862cad51aa325e08c9f24d129f8e 100644 (file)
@@ -614,6 +614,7 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
                (wait)->private = current;                              \
                (wait)->func = autoremove_wake_function;                \
                INIT_LIST_HEAD(&(wait)->task_list);                     \
+               (wait)->flags = 0;                                      \
        } while (0)
 
 /**
index 45375b41a2a0d44e62a370813ab4fdcbe046e3b3..4d40c4d0230baeffbafcb57cb6fac99a1f0174b3 100644 (file)
@@ -121,6 +121,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
  *     IPv6 Address Label subsystem (addrlabel.c)
  */
 extern int                     ipv6_addr_label_init(void);
+extern void                    ipv6_addr_label_cleanup(void);
 extern void                    ipv6_addr_label_rtnl_register(void);
 extern u32                     ipv6_addr_label(struct net *net,
                                                const struct in6_addr *addr,
index 81d1413a87010967684febe7344169baa7d828ef..02386505033d975e666cd924a3bafe3ca02bb100 100644 (file)
@@ -242,6 +242,7 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += skb->len;
        skb->rxhash = 0;
+       skb_set_queue_mapping(skb, 0);
        skb_dst_drop(skb);
        nf_reset(skb);
 }
index bd732d62e1c3a3181c7c324d695b02e1548970e0..7e5e73bfa4dec8e2d45c834507f74d86484b8715 100644 (file)
@@ -199,6 +199,8 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol,
                fl.fl_ip_sport = sport;
                fl.fl_ip_dport = dport;
                fl.proto = protocol;
+               if (inet_sk(sk)->transparent)
+                       fl.flags |= FLOWI_FLAG_ANYSRC;
                ip_rt_put(*rp);
                *rp = NULL;
                security_sk_classify_flow(sk, &fl);
index fc8f36dd0f5c5145932c45aa8e9f441697dcb0ec..4f53532d4c2f0f6e20d200fabafe1ac51dd1632c 100644 (file)
@@ -298,8 +298,8 @@ struct xfrm_state_afinfo {
        const struct xfrm_type  *type_map[IPPROTO_MAX];
        struct xfrm_mode        *mode_map[XFRM_MODE_MAX];
        int                     (*init_flags)(struct xfrm_state *x);
-       void                    (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
-                                               struct xfrm_tmpl *tmpl,
+       void                    (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
+       void                    (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
                                                xfrm_address_t *daddr, xfrm_address_t *saddr);
        int                     (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
        int                     (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
index 0e4cfb694fe70630457af67e1b1bc568f56c9b09..6fa7cbab7d932c6649e9fbd4221615b6b4d0fd8d 100644 (file)
@@ -5,7 +5,9 @@
 #define _TRACE_IRQ_H
 
 #include <linux/tracepoint.h>
-#include <linux/interrupt.h>
+
+struct irqaction;
+struct softirq_action;
 
 #define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
 #define show_softirq_name(val)                         \
@@ -93,7 +95,10 @@ DECLARE_EVENT_CLASS(softirq,
        ),
 
        TP_fast_assign(
-               __entry->vec = (int)(h - vec);
+               if (vec)
+                       __entry->vec = (int)(h - vec);
+               else
+                       __entry->vec = (int)(long)h;
        ),
 
        TP_printk("vec=%d [action=%s]", __entry->vec,
@@ -136,6 +141,23 @@ DEFINE_EVENT(softirq, softirq_exit,
        TP_ARGS(h, vec)
 );
 
+/**
+ * softirq_raise - called immediately when a softirq is raised
+ * @h: pointer to struct softirq_action
+ * @vec: pointer to first struct softirq_action in softirq_vec array
+ *
+ * The @h parameter contains a pointer to the softirq vector number which is
+ * raised. @vec is NULL and it means @h includes vector number not
+ * softirq_action. When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq raise latency.
+ */
+DEFINE_EVENT(softirq, softirq_raise,
+
+       TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+
+       TP_ARGS(h, vec)
+);
+
 #endif /*  _TRACE_IRQ_H */
 
 /* This part must be outside protection */
index 188deca2f3c7721a1baac60cc07e8d7006442c71..8fe1e93f531dd81a8e549689a1b8e9b551231e9e 100644 (file)
@@ -6,10 +6,31 @@
 
 #include <linux/netdevice.h>
 #include <linux/tracepoint.h>
+#include <linux/ftrace.h>
+
+#define NO_DEV "(no_device)"
+
+TRACE_EVENT(napi_poll,
 
-DECLARE_TRACE(napi_poll,
        TP_PROTO(struct napi_struct *napi),
-       TP_ARGS(napi));
+
+       TP_ARGS(napi),
+
+       TP_STRUCT__entry(
+               __field(        struct napi_struct *,   napi)
+               __string(       dev_name, napi->dev ? napi->dev->name : NO_DEV)
+       ),
+
+       TP_fast_assign(
+               __entry->napi = napi;
+               __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
+       ),
+
+       TP_printk("napi poll on napi struct %p for device %s",
+               __entry->napi, __get_str(dev_name))
+);
+
+#undef NO_DEV
 
 #endif /* _TRACE_NAPI_H_ */
 
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
new file mode 100644 (file)
index 0000000..5f247f5
--- /dev/null
@@ -0,0 +1,82 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM net
+
+#if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NET_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(net_dev_xmit,
+
+       TP_PROTO(struct sk_buff *skb,
+                int rc),
+
+       TP_ARGS(skb, rc),
+
+       TP_STRUCT__entry(
+               __field(        void *,         skbaddr         )
+               __field(        unsigned int,   len             )
+               __field(        int,            rc              )
+               __string(       name,           skb->dev->name  )
+       ),
+
+       TP_fast_assign(
+               __entry->skbaddr = skb;
+               __entry->len = skb->len;
+               __entry->rc = rc;
+               __assign_str(name, skb->dev->name);
+       ),
+
+       TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
+               __get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
+);
+
+DECLARE_EVENT_CLASS(net_dev_template,
+
+       TP_PROTO(struct sk_buff *skb),
+
+       TP_ARGS(skb),
+
+       TP_STRUCT__entry(
+               __field(        void *,         skbaddr         )
+               __field(        unsigned int,   len             )
+               __string(       name,           skb->dev->name  )
+       ),
+
+       TP_fast_assign(
+               __entry->skbaddr = skb;
+               __entry->len = skb->len;
+               __assign_str(name, skb->dev->name);
+       ),
+
+       TP_printk("dev=%s skbaddr=%p len=%u",
+               __get_str(name), __entry->skbaddr, __entry->len)
+)
+
+DEFINE_EVENT(net_dev_template, net_dev_queue,
+
+       TP_PROTO(struct sk_buff *skb),
+
+       TP_ARGS(skb)
+);
+
+DEFINE_EVENT(net_dev_template, netif_receive_skb,
+
+       TP_PROTO(struct sk_buff *skb),
+
+       TP_ARGS(skb)
+);
+
+DEFINE_EVENT(net_dev_template, netif_rx,
+
+       TP_PROTO(struct sk_buff *skb),
+
+       TP_ARGS(skb)
+);
+#endif /* _TRACE_NET_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 35a2a6e7bf1e74992b8b83b242c4a507fed4246f..286784d69b8f480343244d8046327a5a7d9883d9 100644 (file)
 #ifndef _TRACE_POWER_ENUM_
 #define _TRACE_POWER_ENUM_
 enum {
-       POWER_NONE = 0,
-       POWER_CSTATE = 1,
-       POWER_PSTATE = 2,
+       POWER_NONE      = 0,
+       POWER_CSTATE    = 1,    /* C-State */
+       POWER_PSTATE    = 2,    /* Fequency change or DVFS */
+       POWER_SSTATE    = 3,    /* Suspend */
 };
 #endif
 
+/*
+ * The power events are used for cpuidle & suspend (power_start, power_end)
+ *  and for cpufreq (power_frequency)
+ */
 DECLARE_EVENT_CLASS(power,
 
        TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
@@ -70,6 +75,85 @@ TRACE_EVENT(power_end,
 
 );
 
+/*
+ * The clock events are used for clock enable/disable and for
+ *  clock rate change
+ */
+DECLARE_EVENT_CLASS(clock,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id),
+
+       TP_STRUCT__entry(
+               __string(       name,           name            )
+               __field(        u64,            state           )
+               __field(        u64,            cpu_id          )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, name);
+               __entry->state = state;
+               __entry->cpu_id = cpu_id;
+       ),
+
+       TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
+               (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_enable,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_disable,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_set_rate,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id)
+);
+
+/*
+ * The power domain events are used for power domains transitions
+ */
+DECLARE_EVENT_CLASS(power_domain,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id),
+
+       TP_STRUCT__entry(
+               __string(       name,           name            )
+               __field(        u64,            state           )
+               __field(        u64,            cpu_id          )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, name);
+               __entry->state = state;
+               __entry->cpu_id = cpu_id;
+),
+
+       TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
+               (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(power_domain, power_domain_target,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id)
+);
+
 #endif /* _TRACE_POWER_H */
 
 /* This part must be outside protection */
index 4b2be6dc76f091647eb30f30a40a49218def8682..75ce9d500d8e3c62dbfffbc0acafca13d11d90aa 100644 (file)
@@ -35,6 +35,23 @@ TRACE_EVENT(kfree_skb,
                __entry->skbaddr, __entry->protocol, __entry->location)
 );
 
+TRACE_EVENT(consume_skb,
+
+       TP_PROTO(struct sk_buff *skb),
+
+       TP_ARGS(skb),
+
+       TP_STRUCT__entry(
+               __field(        void *, skbaddr )
+       ),
+
+       TP_fast_assign(
+               __entry->skbaddr = skb;
+       ),
+
+       TP_printk("skbaddr=%p", __entry->skbaddr)
+);
+
 TRACE_EVENT(skb_copy_datagram_iovec,
 
        TP_PROTO(const struct sk_buff *skb, int len),
index 40a8f462a8224b298690cb07892f93afe8c15214..0e0d49bbb867f239be5690968227c53e7c0226c0 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -743,6 +743,8 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
            {
                struct semid_ds out;
 
+               memset(&out, 0, sizeof(out));
+
                ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
 
                out.sem_otime   = in->sem_otime;
index 0b72d1a74be07c25b99a8da670ec4cfb0963cf77..d52b473c99a1765f54897cf212a91abee47f09a7 100644 (file)
@@ -10,7 +10,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o \
            kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
-           async.o range.o
+           async.o range.o jump_label.o
 obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
 obj-y += groups.o
 
index 03120229db2802929065a210930e41c7fa701ba0..e2bdf37f9fdea71a15acb3523ec20b63a6aa42d3 100644 (file)
@@ -149,9 +149,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
 {
        struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
 
-#ifdef CONFIG_PERF_EVENTS
-       WARN_ON_ONCE(tsk->perf_event_ctxp);
-#endif
+       perf_event_delayed_put(tsk);
        trace_sched_process_free(tsk);
        put_task_struct(tsk);
 }
index b7e9d60a675d3a08a1096ce725380229217d9fa3..c445f8cc408d777dd7a94aec3fa78c07e1d98b98 100644 (file)
@@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                if (IS_ERR(pol))
                        goto fail_nomem_policy;
                vma_set_policy(tmp, pol);
+               tmp->vm_mm = mm;
                if (anon_vma_fork(tmp, mpnt))
                        goto fail_nomem_anon_vma_fork;
                tmp->vm_flags &= ~VM_LOCKED;
-               tmp->vm_mm = mm;
                tmp->vm_next = tmp->vm_prev = NULL;
                file = tmp->vm_file;
                if (file) {
index c7c2aed9e2dcc2e52669e7d3880be0666c496bff..3b714e839c1053ec102f99bf8d924424bdf09381 100644 (file)
@@ -433,8 +433,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
                            perf_overflow_handler_t triggered,
                            struct task_struct *tsk)
 {
-       return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk),
-                                               triggered);
+       return perf_event_create_kernel_counter(attr, -1, tsk, triggered);
 }
 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
 
@@ -516,7 +515,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
        get_online_cpus();
        for_each_online_cpu(cpu) {
                pevent = per_cpu_ptr(cpu_events, cpu);
-               bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
+               bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered);
 
                *pevent = bp;
 
@@ -566,6 +565,61 @@ static struct notifier_block hw_breakpoint_exceptions_nb = {
        .priority = 0x7fffffff
 };
 
+static void bp_perf_event_destroy(struct perf_event *event)
+{
+       release_bp_slot(event);
+}
+
+static int hw_breakpoint_event_init(struct perf_event *bp)
+{
+       int err;
+
+       if (bp->attr.type != PERF_TYPE_BREAKPOINT)
+               return -ENOENT;
+
+       err = register_perf_hw_breakpoint(bp);
+       if (err)
+               return err;
+
+       bp->destroy = bp_perf_event_destroy;
+
+       return 0;
+}
+
+static int hw_breakpoint_add(struct perf_event *bp, int flags)
+{
+       if (!(flags & PERF_EF_START))
+               bp->hw.state = PERF_HES_STOPPED;
+
+       return arch_install_hw_breakpoint(bp);
+}
+
+static void hw_breakpoint_del(struct perf_event *bp, int flags)
+{
+       arch_uninstall_hw_breakpoint(bp);
+}
+
+static void hw_breakpoint_start(struct perf_event *bp, int flags)
+{
+       bp->hw.state = 0;
+}
+
+static void hw_breakpoint_stop(struct perf_event *bp, int flags)
+{
+       bp->hw.state = PERF_HES_STOPPED;
+}
+
+static struct pmu perf_breakpoint = {
+       .task_ctx_nr    = perf_sw_context, /* could eventually get its own */
+
+       .event_init     = hw_breakpoint_event_init,
+       .add            = hw_breakpoint_add,
+       .del            = hw_breakpoint_del,
+       .start          = hw_breakpoint_start,
+       .stop           = hw_breakpoint_stop,
+       .read           = hw_breakpoint_pmu_read,
+};
+
 static int __init init_hw_breakpoint(void)
 {
        unsigned int **task_bp_pinned;
@@ -587,6 +641,8 @@ static int __init init_hw_breakpoint(void)
 
        constraints_initialized = 1;
 
+       perf_pmu_register(&perf_breakpoint);
+
        return register_die_notifier(&hw_breakpoint_exceptions_nb);
 
  err_alloc:
@@ -602,8 +658,3 @@ static int __init init_hw_breakpoint(void)
 core_initcall(init_hw_breakpoint);
 
 
-struct pmu perf_ops_bp = {
-       .enable         = arch_install_hw_breakpoint,
-       .disable        = arch_uninstall_hw_breakpoint,
-       .read           = hw_breakpoint_pmu_read,
-};
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
new file mode 100644 (file)
index 0000000..7be868b
--- /dev/null
@@ -0,0 +1,429 @@
+/*
+ * jump label support
+ *
+ * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
+ *
+ */
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/err.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+#define JUMP_LABEL_HASH_BITS 6
+#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS)
+static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE];
+
+/* mutex to protect coming/going of the the jump_label table */
+static DEFINE_MUTEX(jump_label_mutex);
+
+struct jump_label_entry {
+       struct hlist_node hlist;
+       struct jump_entry *table;
+       int nr_entries;
+       /* hang modules off here */
+       struct hlist_head modules;
+       unsigned long key;
+};
+
+struct jump_label_module_entry {
+       struct hlist_node hlist;
+       struct jump_entry *table;
+       int nr_entries;
+       struct module *mod;
+};
+
+static int jump_label_cmp(const void *a, const void *b)
+{
+       const struct jump_entry *jea = a;
+       const struct jump_entry *jeb = b;
+
+       if (jea->key < jeb->key)
+               return -1;
+
+       if (jea->key > jeb->key)
+               return 1;
+
+       return 0;
+}
+
+static void
+sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop)
+{
+       unsigned long size;
+
+       size = (((unsigned long)stop - (unsigned long)start)
+                                       / sizeof(struct jump_entry));
+       sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
+}
+
+static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct jump_label_entry *e;
+       u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0);
+
+       head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
+       hlist_for_each_entry(e, node, head, hlist) {
+               if (key == e->key)
+                       return e;
+       }
+       return NULL;
+}
+
+static struct jump_label_entry *
+add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table)
+{
+       struct hlist_head *head;
+       struct jump_label_entry *e;
+       u32 hash;
+
+       e = get_jump_label_entry(key);
+       if (e)
+               return ERR_PTR(-EEXIST);
+
+       e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL);
+       if (!e)
+               return ERR_PTR(-ENOMEM);
+
+       hash = jhash((void *)&key, sizeof(jump_label_t), 0);
+       head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
+       e->key = key;
+       e->table = table;
+       e->nr_entries = nr_entries;
+       INIT_HLIST_HEAD(&(e->modules));
+       hlist_add_head(&e->hlist, head);
+       return e;
+}
+
+static int
+build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop)
+{
+       struct jump_entry *iter, *iter_begin;
+       struct jump_label_entry *entry;
+       int count;
+
+       sort_jump_label_entries(start, stop);
+       iter = start;
+       while (iter < stop) {
+               entry = get_jump_label_entry(iter->key);
+               if (!entry) {
+                       iter_begin = iter;
+                       count = 0;
+                       while ((iter < stop) &&
+                               (iter->key == iter_begin->key)) {
+                               iter++;
+                               count++;
+                       }
+                       entry = add_jump_label_entry(iter_begin->key,
+                                                       count, iter_begin);
+                       if (IS_ERR(entry))
+                               return PTR_ERR(entry);
+                } else {
+                       WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n");
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+/***
+ * jump_label_update - update jump label text
+ * @key -  key value associated with a a jump label
+ * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE
+ *
+ * Will enable/disable the jump for jump label @key, depending on the
+ * value of @type.
+ *
+ */
+
+void jump_label_update(unsigned long key, enum jump_label_type type)
+{
+       struct jump_entry *iter;
+       struct jump_label_entry *entry;
+       struct hlist_node *module_node;
+       struct jump_label_module_entry *e_module;
+       int count;
+
+       mutex_lock(&jump_label_mutex);
+       entry = get_jump_label_entry((jump_label_t)key);
+       if (entry) {
+               count = entry->nr_entries;
+               iter = entry->table;
+               while (count--) {
+                       if (kernel_text_address(iter->code))
+                               arch_jump_label_transform(iter, type);
+                       iter++;
+               }
+               /* eanble/disable jump labels in modules */
+               hlist_for_each_entry(e_module, module_node, &(entry->modules),
+                                                       hlist) {
+                       count = e_module->nr_entries;
+                       iter = e_module->table;
+                       while (count--) {
+                               if (kernel_text_address(iter->code))
+                                       arch_jump_label_transform(iter, type);
+                               iter++;
+                       }
+               }
+       }
+       mutex_unlock(&jump_label_mutex);
+}
+
+static int addr_conflict(struct jump_entry *entry, void *start, void *end)
+{
+       if (entry->code <= (unsigned long)end &&
+               entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
+               return 1;
+
+       return 0;
+}
+
+#ifdef CONFIG_MODULES
+
+static int module_conflict(void *start, void *end)
+{
+       struct hlist_head *head;
+       struct hlist_node *node, *node_next, *module_node, *module_node_next;
+       struct jump_label_entry *e;
+       struct jump_label_module_entry *e_module;
+       struct jump_entry *iter;
+       int i, count;
+       int conflict = 0;
+
+       for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
+               head = &jump_label_table[i];
+               hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
+                       hlist_for_each_entry_safe(e_module, module_node,
+                                                       module_node_next,
+                                                       &(e->modules), hlist) {
+                               count = e_module->nr_entries;
+                               iter = e_module->table;
+                               while (count--) {
+                                       if (addr_conflict(iter, start, end)) {
+                                               conflict = 1;
+                                               goto out;
+                                       }
+                                       iter++;
+                               }
+                       }
+               }
+       }
+out:
+       return conflict;
+}
+
+#endif
+
+/***
+ * jump_label_text_reserved - check if addr range is reserved
+ * @start: start text addr
+ * @end: end text addr
+ *
+ * checks if the text addr located between @start and @end
+ * overlaps with any of the jump label patch addresses. Code
+ * that wants to modify kernel text should first verify that
+ * it does not overlap with any of the jump label addresses.
+ *
+ * returns 1 if there is an overlap, 0 otherwise
+ */
+int jump_label_text_reserved(void *start, void *end)
+{
+       struct jump_entry *iter;
+       struct jump_entry *iter_start = __start___jump_table;
+       struct jump_entry *iter_stop = __start___jump_table;
+       int conflict = 0;
+
+       mutex_lock(&jump_label_mutex);
+       iter = iter_start;
+       while (iter < iter_stop) {
+               if (addr_conflict(iter, start, end)) {
+                       conflict = 1;
+                       goto out;
+               }
+               iter++;
+       }
+
+       /* now check modules */
+#ifdef CONFIG_MODULES
+       conflict = module_conflict(start, end);
+#endif
+out:
+       mutex_unlock(&jump_label_mutex);
+       return conflict;
+}
+
+static __init int init_jump_label(void)
+{
+       int ret;
+       struct jump_entry *iter_start = __start___jump_table;
+       struct jump_entry *iter_stop = __stop___jump_table;
+       struct jump_entry *iter;
+
+       mutex_lock(&jump_label_mutex);
+       ret = build_jump_label_hashtable(__start___jump_table,
+                                        __stop___jump_table);
+       iter = iter_start;
+       while (iter < iter_stop) {
+               arch_jump_label_text_poke_early(iter->code);
+               iter++;
+       }
+       mutex_unlock(&jump_label_mutex);
+       return ret;
+}
+early_initcall(init_jump_label);
+
+#ifdef CONFIG_MODULES
+
+static struct jump_label_module_entry *
+add_jump_label_module_entry(struct jump_label_entry *entry,
+                           struct jump_entry *iter_begin,
+                           int count, struct module *mod)
+{
+       struct jump_label_module_entry *e;
+
+       e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL);
+       if (!e)
+               return ERR_PTR(-ENOMEM);
+       e->mod = mod;
+       e->nr_entries = count;
+       e->table = iter_begin;
+       hlist_add_head(&e->hlist, &entry->modules);
+       return e;
+}
+
+static int add_jump_label_module(struct module *mod)
+{
+       struct jump_entry *iter, *iter_begin;
+       struct jump_label_entry *entry;
+       struct jump_label_module_entry *module_entry;
+       int count;
+
+       /* if the module doesn't have jump label entries, just return */
+       if (!mod->num_jump_entries)
+               return 0;
+
+       sort_jump_label_entries(mod->jump_entries,
+                               mod->jump_entries + mod->num_jump_entries);
+       iter = mod->jump_entries;
+       while (iter < mod->jump_entries + mod->num_jump_entries) {
+               entry = get_jump_label_entry(iter->key);
+               iter_begin = iter;
+               count = 0;
+               while ((iter < mod->jump_entries + mod->num_jump_entries) &&
+                       (iter->key == iter_begin->key)) {
+                               iter++;
+                               count++;
+               }
+               if (!entry) {
+                       entry = add_jump_label_entry(iter_begin->key, 0, NULL);
+                       if (IS_ERR(entry))
+                               return PTR_ERR(entry);
+               }
+               module_entry = add_jump_label_module_entry(entry, iter_begin,
+                                                          count, mod);
+               if (IS_ERR(module_entry))
+                       return PTR_ERR(module_entry);
+       }
+       return 0;
+}
+
+static void remove_jump_label_module(struct module *mod)
+{
+       struct hlist_head *head;
+       struct hlist_node *node, *node_next, *module_node, *module_node_next;
+       struct jump_label_entry *e;
+       struct jump_label_module_entry *e_module;
+       int i;
+
+       /* if the module doesn't have jump label entries, just return */
+       if (!mod->num_jump_entries)
+               return;
+
+       for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
+               head = &jump_label_table[i];
+               hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
+                       hlist_for_each_entry_safe(e_module, module_node,
+                                                 module_node_next,
+                                                 &(e->modules), hlist) {
+                               if (e_module->mod == mod) {
+                                       hlist_del(&e_module->hlist);
+                                       kfree(e_module);
+                               }
+                       }
+                       if (hlist_empty(&e->modules) && (e->nr_entries == 0)) {
+                               hlist_del(&e->hlist);
+                               kfree(e);
+                       }
+               }
+       }
+}
+
+static int
+jump_label_module_notify(struct notifier_block *self, unsigned long val,
+                        void *data)
+{
+       struct module *mod = data;
+       int ret = 0;
+
+       switch (val) {
+       case MODULE_STATE_COMING:
+               mutex_lock(&jump_label_mutex);
+               ret = add_jump_label_module(mod);
+               if (ret)
+                       remove_jump_label_module(mod);
+               mutex_unlock(&jump_label_mutex);
+               break;
+       case MODULE_STATE_GOING:
+               mutex_lock(&jump_label_mutex);
+               remove_jump_label_module(mod);
+               mutex_unlock(&jump_label_mutex);
+               break;
+       }
+       return ret;
+}
+
+/***
+ * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
+ * @mod: module to patch
+ *
+ * Allow for run-time selection of the optimal nops. Before the module
+ * loads patch these with arch_get_jump_label_nop(), which is specified by
+ * the arch specific jump label code.
+ */
+void jump_label_apply_nops(struct module *mod)
+{
+       struct jump_entry *iter;
+
+       /* if the module doesn't have jump label entries, just return */
+       if (!mod->num_jump_entries)
+               return;
+
+       iter = mod->jump_entries;
+       while (iter < mod->jump_entries + mod->num_jump_entries) {
+               arch_jump_label_text_poke_early(iter->code);
+               iter++;
+       }
+}
+
+struct notifier_block jump_label_module_nb = {
+       .notifier_call = jump_label_module_notify,
+       .priority = 0,
+};
+
+static __init int init_jump_label_module(void)
+{
+       return register_module_notifier(&jump_label_module_nb);
+}
+early_initcall(init_jump_label_module);
+
+#endif /* CONFIG_MODULES */
+
+#endif
index 6b5580c57644dc1804b02fd85648e7100d5d75dd..01a0700e873f53ca60084da3c0c1142bebf49b16 100644 (file)
@@ -365,8 +365,6 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
        n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
        n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
 
-       if (n)
-               sg_mark_end(sgl + n - 1);
        return n;
 }
 
index 282035f3ae964e1e288f352c370be8edd11d3078..ec4210c6501e1b549d18c2a89fa3cbe8725da35b 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/memory.h>
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
+#include <linux/jump_label.h>
 
 #include <asm-generic/sections.h>
 #include <asm/cacheflush.h>
@@ -399,7 +400,7 @@ static inline int kprobe_optready(struct kprobe *p)
  * Return an optimized kprobe whose optimizing code replaces
  * instructions including addr (exclude breakpoint).
  */
-struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
+static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
 {
        int i;
        struct kprobe *p = NULL;
@@ -831,6 +832,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
 
 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
                         struct hlist_head **head, unsigned long *flags)
+__acquires(hlist_lock)
 {
        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
        spinlock_t *hlist_lock;
@@ -842,6 +844,7 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
 
 static void __kprobes kretprobe_table_lock(unsigned long hash,
        unsigned long *flags)
+__acquires(hlist_lock)
 {
        spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
        spin_lock_irqsave(hlist_lock, *flags);
@@ -849,6 +852,7 @@ static void __kprobes kretprobe_table_lock(unsigned long hash,
 
 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
        unsigned long *flags)
+__releases(hlist_lock)
 {
        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
        spinlock_t *hlist_lock;
@@ -857,7 +861,9 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
        spin_unlock_irqrestore(hlist_lock, *flags);
 }
 
-void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
+static void __kprobes kretprobe_table_unlock(unsigned long hash,
+       unsigned long *flags)
+__releases(hlist_lock)
 {
        spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
        spin_unlock_irqrestore(hlist_lock, *flags);
@@ -1141,7 +1147,8 @@ int __kprobes register_kprobe(struct kprobe *p)
        preempt_disable();
        if (!kernel_text_address((unsigned long) p->addr) ||
            in_kprobes_functions((unsigned long) p->addr) ||
-           ftrace_text_reserved(p->addr, p->addr)) {
+           ftrace_text_reserved(p->addr, p->addr) ||
+           jump_label_text_reserved(p->addr, p->addr)) {
                preempt_enable();
                return -EINVAL;
        }
@@ -1339,18 +1346,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num)
        if (num <= 0)
                return -EINVAL;
        for (i = 0; i < num; i++) {
-               unsigned long addr;
+               unsigned long addr, offset;
                jp = jps[i];
                addr = arch_deref_entry_point(jp->entry);
 
-               if (!kernel_text_address(addr))
-                       ret = -EINVAL;
-               else {
-                       /* Todo: Verify probepoint is a function entry point */
+               /* Verify probepoint is a function entry point */
+               if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
+                   offset == 0) {
                        jp->kp.pre_handler = setjmp_pre_handler;
                        jp->kp.break_handler = longjmp_break_handler;
                        ret = register_kprobe(&jp->kp);
-               }
+               } else
+                       ret = -EINVAL;
+
                if (ret < 0) {
                        if (i > 0)
                                unregister_jprobes(jps, i);
index d0b5f8db11b4a4183c229e2a44f433a99f58090a..2df46301a7a407dcde3435542e38f6944358d7c1 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/async.h>
 #include <linux/percpu.h>
 #include <linux/kmemleak.h>
+#include <linux/jump_label.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/module.h>
@@ -1537,6 +1538,7 @@ static int __unlink_module(void *_mod)
 {
        struct module *mod = _mod;
        list_del(&mod->list);
+       module_bug_cleanup(mod);
        return 0;
 }
 
@@ -2308,6 +2310,11 @@ static void find_module_sections(struct module *mod, struct load_info *info)
                                        sizeof(*mod->tracepoints),
                                        &mod->num_tracepoints);
 #endif
+#ifdef HAVE_JUMP_LABEL
+       mod->jump_entries = section_objs(info, "__jump_table",
+                                       sizeof(*mod->jump_entries),
+                                       &mod->num_jump_entries);
+#endif
 #ifdef CONFIG_EVENT_TRACING
        mod->trace_events = section_objs(info, "_ftrace_events",
                                         sizeof(*mod->trace_events),
@@ -2625,6 +2632,7 @@ static struct module *load_module(void __user *umod,
        if (err < 0)
                goto ddebug;
 
+       module_bug_finalize(info.hdr, info.sechdrs, mod);
        list_add_rcu(&mod->list, &modules);
        mutex_unlock(&module_mutex);
 
@@ -2650,6 +2658,8 @@ static struct module *load_module(void __user *umod,
        mutex_lock(&module_mutex);
        /* Unlink carefully: kallsyms could be walking list. */
        list_del_rcu(&mod->list);
+       module_bug_cleanup(mod);
+
  ddebug:
        if (!mod->taints)
                dynamic_debug_remove(info.debug);
index fc512684423f9636336f1ca2642c564c13e409e8..1ec3916ffef008521862e1240a93040f1fa7df72 100644 (file)
 #include <linux/kernel_stat.h>
 #include <linux/perf_event.h>
 #include <linux/ftrace_event.h>
-#include <linux/hw_breakpoint.h>
 
 #include <asm/irq_regs.h>
 
-/*
- * Each CPU has a list of per CPU events:
- */
-static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
-
-int perf_max_events __read_mostly = 1;
-static int perf_reserved_percpu __read_mostly;
-static int perf_overcommit __read_mostly = 1;
-
 static atomic_t nr_events __read_mostly;
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
 static atomic_t nr_task_events __read_mostly;
 
+static LIST_HEAD(pmus);
+static DEFINE_MUTEX(pmus_lock);
+static struct srcu_struct pmus_srcu;
+
 /*
  * perf event paranoia level:
  *  -1 - not paranoid at all
@@ -67,22 +61,6 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
 
 static atomic64_t perf_event_id;
 
-/*
- * Lock for (sysadmin-configurable) event reservations:
- */
-static DEFINE_SPINLOCK(perf_resource_lock);
-
-/*
- * Architecture provided APIs - weak aliases:
- */
-extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
-       return NULL;
-}
-
-void __weak hw_perf_disable(void)              { barrier(); }
-void __weak hw_perf_enable(void)               { barrier(); }
-
 void __weak perf_event_print_debug(void)       { }
 
 extern __weak const char *perf_pmu_name(void)
@@ -90,18 +68,36 @@ extern __weak const char *perf_pmu_name(void)
        return "pmu";
 }
 
-static DEFINE_PER_CPU(int, perf_disable_count);
+void perf_pmu_disable(struct pmu *pmu)
+{
+       int *count = this_cpu_ptr(pmu->pmu_disable_count);
+       if (!(*count)++)
+               pmu->pmu_disable(pmu);
+}
 
-void perf_disable(void)
+void perf_pmu_enable(struct pmu *pmu)
 {
-       if (!__get_cpu_var(perf_disable_count)++)
-               hw_perf_disable();
+       int *count = this_cpu_ptr(pmu->pmu_disable_count);
+       if (!--(*count))
+               pmu->pmu_enable(pmu);
 }
 
-void perf_enable(void)
+static DEFINE_PER_CPU(struct list_head, rotation_list);
+
+/*
+ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+ * because they're strictly cpu affine and rotate_start is called with IRQs
+ * disabled, while rotate_context is called from IRQ context.
+ */
+static void perf_pmu_rotate_start(struct pmu *pmu)
 {
-       if (!--__get_cpu_var(perf_disable_count))
-               hw_perf_enable();
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+       struct list_head *head = &__get_cpu_var(rotation_list);
+
+       WARN_ON(!irqs_disabled());
+
+       if (list_empty(&cpuctx->rotation_list))
+               list_add(&cpuctx->rotation_list, head);
 }
 
 static void get_ctx(struct perf_event_context *ctx)
@@ -156,13 +152,13 @@ static u64 primary_event_id(struct perf_event *event)
  * the context could get moved to another task.
  */
 static struct perf_event_context *
-perf_lock_task_context(struct task_struct *task, unsigned long *flags)
+perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
 {
        struct perf_event_context *ctx;
 
        rcu_read_lock();
- retry:
-       ctx = rcu_dereference(task->perf_event_ctxp);
+retry:
+       ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
        if (ctx) {
                /*
                 * If this context is a clone of another, it might
@@ -175,7 +171,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
                 * can't get swapped on us any more.
                 */
                raw_spin_lock_irqsave(&ctx->lock, *flags);
-               if (ctx != rcu_dereference(task->perf_event_ctxp)) {
+               if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
                        raw_spin_unlock_irqrestore(&ctx->lock, *flags);
                        goto retry;
                }
@@ -194,12 +190,13 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
  * can't get swapped to another task.  This also increments its
  * reference count so that the context can't get freed.
  */
-static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
+static struct perf_event_context *
+perf_pin_task_context(struct task_struct *task, int ctxn)
 {
        struct perf_event_context *ctx;
        unsigned long flags;
 
-       ctx = perf_lock_task_context(task, &flags);
+       ctx = perf_lock_task_context(task, ctxn, &flags);
        if (ctx) {
                ++ctx->pin_count;
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
@@ -307,6 +304,8 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
        }
 
        list_add_rcu(&event->event_entry, &ctx->event_list);
+       if (!ctx->nr_events)
+               perf_pmu_rotate_start(ctx->pmu);
        ctx->nr_events++;
        if (event->attr.inherit_stat)
                ctx->nr_stat++;
@@ -441,7 +440,7 @@ event_sched_out(struct perf_event *event,
                event->state = PERF_EVENT_STATE_OFF;
        }
        event->tstamp_stopped = ctx->time;
-       event->pmu->disable(event);
+       event->pmu->del(event, 0);
        event->oncpu = -1;
 
        if (!is_software_event(event))
@@ -471,6 +470,12 @@ group_sched_out(struct perf_event *group_event,
                cpuctx->exclusive = 0;
 }
 
+static inline struct perf_cpu_context *
+__get_cpu_context(struct perf_event_context *ctx)
+{
+       return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
+}
+
 /*
  * Cross CPU call to remove a performance event
  *
@@ -479,9 +484,9 @@ group_sched_out(struct perf_event *group_event,
  */
 static void __perf_event_remove_from_context(void *info)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        /*
         * If this is a task context, we need to check whether it is
@@ -492,27 +497,11 @@ static void __perf_event_remove_from_context(void *info)
                return;
 
        raw_spin_lock(&ctx->lock);
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level.
-        */
-       perf_disable();
 
        event_sched_out(event, cpuctx, ctx);
 
        list_del_event(event, ctx);
 
-       if (!ctx->task) {
-               /*
-                * Allow more per task events with respect to the
-                * reservation:
-                */
-               cpuctx->max_pertask =
-                       min(perf_max_events - ctx->nr_events,
-                           perf_max_events - perf_reserved_percpu);
-       }
-
-       perf_enable();
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -577,8 +566,8 @@ retry:
 static void __perf_event_disable(void *info)
 {
        struct perf_event *event = info;
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        /*
         * If this is a per-task event, need to check whether this
@@ -633,7 +622,7 @@ void perf_event_disable(struct perf_event *event)
                return;
        }
 
- retry:
+retry:
        task_oncpu_function_call(task, __perf_event_disable, event);
 
        raw_spin_lock_irq(&ctx->lock);
@@ -672,7 +661,7 @@ event_sched_in(struct perf_event *event,
         */
        smp_wmb();
 
-       if (event->pmu->enable(event)) {
+       if (event->pmu->add(event, PERF_EF_START)) {
                event->state = PERF_EVENT_STATE_INACTIVE;
                event->oncpu = -1;
                return -EAGAIN;
@@ -696,22 +685,15 @@ group_sched_in(struct perf_event *group_event,
               struct perf_event_context *ctx)
 {
        struct perf_event *event, *partial_group = NULL;
-       const struct pmu *pmu = group_event->pmu;
-       bool txn = false;
+       struct pmu *pmu = group_event->pmu;
 
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
 
-       /* Check if group transaction availabe */
-       if (pmu->start_txn)
-               txn = true;
-
-       if (txn)
-               pmu->start_txn(pmu);
+       pmu->start_txn(pmu);
 
        if (event_sched_in(group_event, cpuctx, ctx)) {
-               if (txn)
-                       pmu->cancel_txn(pmu);
+               pmu->cancel_txn(pmu);
                return -EAGAIN;
        }
 
@@ -725,7 +707,7 @@ group_sched_in(struct perf_event *group_event,
                }
        }
 
-       if (!txn || !pmu->commit_txn(pmu))
+       if (!pmu->commit_txn(pmu))
                return 0;
 
 group_error:
@@ -740,8 +722,7 @@ group_error:
        }
        event_sched_out(group_event, cpuctx, ctx);
 
-       if (txn)
-               pmu->cancel_txn(pmu);
+       pmu->cancel_txn(pmu);
 
        return -EAGAIN;
 }
@@ -794,10 +775,10 @@ static void add_event_to_ctx(struct perf_event *event,
  */
 static void __perf_install_in_context(void *info)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        int err;
 
        /*
@@ -817,12 +798,6 @@ static void __perf_install_in_context(void *info)
        ctx->is_active = 1;
        update_context_time(ctx);
 
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level. NOP for non NMI based events.
-        */
-       perf_disable();
-
        add_event_to_ctx(event, ctx);
 
        if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -860,12 +835,7 @@ static void __perf_install_in_context(void *info)
                }
        }
 
-       if (!err && !ctx->task && cpuctx->max_pertask)
-               cpuctx->max_pertask--;
-
- unlock:
-       perf_enable();
-
+unlock:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -888,6 +858,8 @@ perf_install_in_context(struct perf_event_context *ctx,
 {
        struct task_struct *task = ctx->task;
 
+       event->ctx = ctx;
+
        if (!task) {
                /*
                 * Per cpu events are installed via an smp call and
@@ -936,10 +908,12 @@ static void __perf_event_mark_enabled(struct perf_event *event,
 
        event->state = PERF_EVENT_STATE_INACTIVE;
        event->tstamp_enabled = ctx->time - event->total_time_enabled;
-       list_for_each_entry(sub, &event->sibling_list, group_entry)
-               if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+       list_for_each_entry(sub, &event->sibling_list, group_entry) {
+               if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
                        sub->tstamp_enabled =
                                ctx->time - sub->total_time_enabled;
+               }
+       }
 }
 
 /*
@@ -948,9 +922,9 @@ static void __perf_event_mark_enabled(struct perf_event *event,
 static void __perf_event_enable(void *info)
 {
        struct perf_event *event = info;
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        int err;
 
        /*
@@ -984,12 +958,10 @@ static void __perf_event_enable(void *info)
        if (!group_can_go_on(event, cpuctx, 1)) {
                err = -EEXIST;
        } else {
-               perf_disable();
                if (event == leader)
                        err = group_sched_in(event, cpuctx, ctx);
                else
                        err = event_sched_in(event, cpuctx, ctx);
-               perf_enable();
        }
 
        if (err) {
@@ -1005,7 +977,7 @@ static void __perf_event_enable(void *info)
                }
        }
 
- unlock:
+unlock:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1046,7 +1018,7 @@ void perf_event_enable(struct perf_event *event)
        if (event->state == PERF_EVENT_STATE_ERROR)
                event->state = PERF_EVENT_STATE_OFF;
 
- retry:
+retry:
        raw_spin_unlock_irq(&ctx->lock);
        task_oncpu_function_call(task, __perf_event_enable, event);
 
@@ -1066,7 +1038,7 @@ void perf_event_enable(struct perf_event *event)
        if (event->state == PERF_EVENT_STATE_OFF)
                __perf_event_mark_enabled(event, ctx);
 
- out:
+out:
        raw_spin_unlock_irq(&ctx->lock);
 }
 
@@ -1097,26 +1069,26 @@ static void ctx_sched_out(struct perf_event_context *ctx,
        struct perf_event *event;
 
        raw_spin_lock(&ctx->lock);
+       perf_pmu_disable(ctx->pmu);
        ctx->is_active = 0;
        if (likely(!ctx->nr_events))
                goto out;
        update_context_time(ctx);
 
-       perf_disable();
        if (!ctx->nr_active)
-               goto out_enable;
+               goto out;
 
-       if (event_type & EVENT_PINNED)
+       if (event_type & EVENT_PINNED) {
                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
+       }
 
-       if (event_type & EVENT_FLEXIBLE)
+       if (event_type & EVENT_FLEXIBLE) {
                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
-
- out_enable:
-       perf_enable();
- out:
+       }
+out:
+       perf_pmu_enable(ctx->pmu);
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1214,34 +1186,25 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
        }
 }
 
-/*
- * Called from scheduler to remove the events of the current task,
- * with interrupts disabled.
- *
- * We stop each event and update the event value in event->count.
- *
- * This does not protect us against NMI, but disable()
- * sets the disabled bit in the control field of event _before_
- * accessing the event control register. If a NMI hits, then it will
- * not restart the event.
- */
-void perf_event_task_sched_out(struct task_struct *task,
-                                struct task_struct *next)
+void perf_event_context_sched_out(struct task_struct *task, int ctxn,
+                                 struct task_struct *next)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
        struct perf_event_context *next_ctx;
        struct perf_event_context *parent;
+       struct perf_cpu_context *cpuctx;
        int do_switch = 1;
 
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+       if (likely(!ctx))
+               return;
 
-       if (likely(!ctx || !cpuctx->task_ctx))
+       cpuctx = __get_cpu_context(ctx);
+       if (!cpuctx->task_ctx)
                return;
 
        rcu_read_lock();
        parent = rcu_dereference(ctx->parent_ctx);
-       next_ctx = next->perf_event_ctxp;
+       next_ctx = next->perf_event_ctxp[ctxn];
        if (parent && next_ctx &&
            rcu_dereference(next_ctx->parent_ctx) == parent) {
                /*
@@ -1260,8 +1223,8 @@ void perf_event_task_sched_out(struct task_struct *task,
                         * XXX do we need a memory barrier of sorts
                         * wrt to rcu_dereference() of perf_event_ctxp
                         */
-                       task->perf_event_ctxp = next_ctx;
-                       next->perf_event_ctxp = ctx;
+                       task->perf_event_ctxp[ctxn] = next_ctx;
+                       next->perf_event_ctxp[ctxn] = ctx;
                        ctx->task = next;
                        next_ctx->task = task;
                        do_switch = 0;
@@ -1279,10 +1242,35 @@ void perf_event_task_sched_out(struct task_struct *task,
        }
 }
 
+#define for_each_task_context_nr(ctxn)                                 \
+       for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
+
+/*
+ * Called from scheduler to remove the events of the current task,
+ * with interrupts disabled.
+ *
+ * We stop each event and update the event value in event->count.
+ *
+ * This does not protect us against NMI, but disable()
+ * sets the disabled bit in the control field of event _before_
+ * accessing the event control register. If a NMI hits, then it will
+ * not restart the event.
+ */
+void perf_event_task_sched_out(struct task_struct *task,
+                              struct task_struct *next)
+{
+       int ctxn;
+
+       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+
+       for_each_task_context_nr(ctxn)
+               perf_event_context_sched_out(task, ctxn, next);
+}
+
 static void task_ctx_sched_out(struct perf_event_context *ctx,
                               enum event_type_t event_type)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        if (!cpuctx->task_ctx)
                return;
@@ -1355,9 +1343,10 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
                if (event->cpu != -1 && event->cpu != smp_processor_id())
                        continue;
 
-               if (group_can_go_on(event, cpuctx, can_add_hw))
+               if (group_can_go_on(event, cpuctx, can_add_hw)) {
                        if (group_sched_in(event, cpuctx, ctx))
                                can_add_hw = 0;
+               }
        }
 }
 
@@ -1373,8 +1362,6 @@ ctx_sched_in(struct perf_event_context *ctx,
 
        ctx->timestamp = perf_clock();
 
-       perf_disable();
-
        /*
         * First go through the list and put on any pinned groups
         * in order to give them the best chance of going on.
@@ -1386,8 +1373,7 @@ ctx_sched_in(struct perf_event_context *ctx,
        if (event_type & EVENT_FLEXIBLE)
                ctx_flexible_sched_in(ctx, cpuctx);
 
-       perf_enable();
- out:
+out:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1399,43 +1385,28 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
        ctx_sched_in(ctx, cpuctx, event_type);
 }
 
-static void task_ctx_sched_in(struct task_struct *task,
+static void task_ctx_sched_in(struct perf_event_context *ctx,
                              enum event_type_t event_type)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_cpu_context *cpuctx;
 
-       if (likely(!ctx))
-               return;
+               cpuctx = __get_cpu_context(ctx);
        if (cpuctx->task_ctx == ctx)
                return;
+
        ctx_sched_in(ctx, cpuctx, event_type);
        cpuctx->task_ctx = ctx;
 }
-/*
- * Called from scheduler to add the events of the current task
- * with interrupts disabled.
- *
- * We restore the event value and then enable it.
- *
- * This does not protect us against NMI, but enable()
- * sets the enabled bit in the control field of event _before_
- * accessing the event control register. If a NMI hits, then it will
- * keep the event running.
- */
-void perf_event_task_sched_in(struct task_struct *task)
-{
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
 
-       if (likely(!ctx))
-               return;
+void perf_event_context_sched_in(struct perf_event_context *ctx)
+{
+       struct perf_cpu_context *cpuctx;
 
+       cpuctx = __get_cpu_context(ctx);
        if (cpuctx->task_ctx == ctx)
                return;
 
-       perf_disable();
-
+       perf_pmu_disable(ctx->pmu);
        /*
         * We want to keep the following priority order:
         * cpu pinned (that don't need to move), task pinned,
@@ -1449,7 +1420,37 @@ void perf_event_task_sched_in(struct task_struct *task)
 
        cpuctx->task_ctx = ctx;
 
-       perf_enable();
+       /*
+        * Since these rotations are per-cpu, we need to ensure the
+        * cpu-context we got scheduled on is actually rotating.
+        */
+       perf_pmu_rotate_start(ctx->pmu);
+       perf_pmu_enable(ctx->pmu);
+}
+
+/*
+ * Called from scheduler to add the events of the current task
+ * with interrupts disabled.
+ *
+ * We restore the event value and then enable it.
+ *
+ * This does not protect us against NMI, but enable()
+ * sets the enabled bit in the control field of event _before_
+ * accessing the event control register. If a NMI hits, then it will
+ * keep the event running.
+ */
+void perf_event_task_sched_in(struct task_struct *task)
+{
+       struct perf_event_context *ctx;
+       int ctxn;
+
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (likely(!ctx))
+                       continue;
+
+               perf_event_context_sched_in(ctx);
+       }
 }
 
 #define MAX_INTERRUPTS (~0ULL)
@@ -1529,22 +1530,6 @@ do {                                     \
        return div64_u64(dividend, divisor);
 }
 
-static void perf_event_stop(struct perf_event *event)
-{
-       if (!event->pmu->stop)
-               return event->pmu->disable(event);
-
-       return event->pmu->stop(event);
-}
-
-static int perf_event_start(struct perf_event *event)
-{
-       if (!event->pmu->start)
-               return event->pmu->enable(event);
-
-       return event->pmu->start(event);
-}
-
 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
 {
        struct hw_perf_event *hwc = &event->hw;
@@ -1564,15 +1549,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
        hwc->sample_period = sample_period;
 
        if (local64_read(&hwc->period_left) > 8*sample_period) {
-               perf_disable();
-               perf_event_stop(event);
+               event->pmu->stop(event, PERF_EF_UPDATE);
                local64_set(&hwc->period_left, 0);
-               perf_event_start(event);
-               perf_enable();
+               event->pmu->start(event, PERF_EF_RELOAD);
        }
 }
 
-static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
+static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
 {
        struct perf_event *event;
        struct hw_perf_event *hwc;
@@ -1597,23 +1580,19 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
                 */
                if (interrupts == MAX_INTERRUPTS) {
                        perf_log_throttle(event, 1);
-                       perf_disable();
-                       event->pmu->unthrottle(event);
-                       perf_enable();
+                       event->pmu->start(event, 0);
                }
 
                if (!event->attr.freq || !event->attr.sample_freq)
                        continue;
 
-               perf_disable();
                event->pmu->read(event);
                now = local64_read(&event->count);
                delta = now - hwc->freq_count_stamp;
                hwc->freq_count_stamp = now;
 
                if (delta > 0)
-                       perf_adjust_period(event, TICK_NSEC, delta);
-               perf_enable();
+                       perf_adjust_period(event, period, delta);
        }
        raw_spin_unlock(&ctx->lock);
 }
@@ -1631,32 +1610,38 @@ static void rotate_ctx(struct perf_event_context *ctx)
        raw_spin_unlock(&ctx->lock);
 }
 
-void perf_event_task_tick(struct task_struct *curr)
+/*
+ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+ * because they're strictly cpu affine and rotate_start is called with IRQs
+ * disabled, while rotate_context is called from IRQ context.
+ */
+static void perf_rotate_context(struct perf_cpu_context *cpuctx)
 {
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
-       int rotate = 0;
-
-       if (!atomic_read(&nr_events))
-               return;
+       u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
+       struct perf_event_context *ctx = NULL;
+       int rotate = 0, remove = 1;
 
-       cpuctx = &__get_cpu_var(perf_cpu_context);
-       if (cpuctx->ctx.nr_events &&
-           cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
-               rotate = 1;
+       if (cpuctx->ctx.nr_events) {
+               remove = 0;
+               if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
+                       rotate = 1;
+       }
 
-       ctx = curr->perf_event_ctxp;
-       if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
-               rotate = 1;
+       ctx = cpuctx->task_ctx;
+       if (ctx && ctx->nr_events) {
+               remove = 0;
+               if (ctx->nr_events != ctx->nr_active)
+                       rotate = 1;
+       }
 
-       perf_ctx_adjust_freq(&cpuctx->ctx);
+       perf_pmu_disable(cpuctx->ctx.pmu);
+       perf_ctx_adjust_freq(&cpuctx->ctx, interval);
        if (ctx)
-               perf_ctx_adjust_freq(ctx);
+               perf_ctx_adjust_freq(ctx, interval);
 
        if (!rotate)
-               return;
+               goto done;
 
-       perf_disable();
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1667,8 +1652,27 @@ void perf_event_task_tick(struct task_struct *curr)
 
        cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
-               task_ctx_sched_in(curr, EVENT_FLEXIBLE);
-       perf_enable();
+               task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
+
+done:
+       if (remove)
+               list_del_init(&cpuctx->rotation_list);
+
+       perf_pmu_enable(cpuctx->ctx.pmu);
+}
+
+void perf_event_task_tick(void)
+{
+       struct list_head *head = &__get_cpu_var(rotation_list);
+       struct perf_cpu_context *cpuctx, *tmp;
+
+       WARN_ON(!irqs_disabled());
+
+       list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
+               if (cpuctx->jiffies_interval == 1 ||
+                               !(jiffies % cpuctx->jiffies_interval))
+                       perf_rotate_context(cpuctx);
+       }
 }
 
 static int event_enable_on_exec(struct perf_event *event,
@@ -1690,20 +1694,18 @@ static int event_enable_on_exec(struct perf_event *event,
  * Enable all of a task's events that have been marked enable-on-exec.
  * This expects task == current.
  */
-static void perf_event_enable_on_exec(struct task_struct *task)
+static void perf_event_enable_on_exec(struct perf_event_context *ctx)
 {
-       struct perf_event_context *ctx;
        struct perf_event *event;
        unsigned long flags;
        int enabled = 0;
        int ret;
 
        local_irq_save(flags);
-       ctx = task->perf_event_ctxp;
        if (!ctx || !ctx->nr_events)
                goto out;
 
-       __perf_event_task_sched_out(ctx);
+       task_ctx_sched_out(ctx, EVENT_ALL);
 
        raw_spin_lock(&ctx->lock);
 
@@ -1727,8 +1729,8 @@ static void perf_event_enable_on_exec(struct task_struct *task)
 
        raw_spin_unlock(&ctx->lock);
 
-       perf_event_task_sched_in(task);
- out:
+       perf_event_context_sched_in(ctx);
+out:
        local_irq_restore(flags);
 }
 
@@ -1737,9 +1739,9 @@ static void perf_event_enable_on_exec(struct task_struct *task)
  */
 static void __perf_event_read(void *info)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        /*
         * If this is a task context, we need to check whether it is
@@ -1787,110 +1789,355 @@ static u64 perf_event_read(struct perf_event *event)
 }
 
 /*
- * Initialize the perf_event context in a task_struct:
+ * Callchain support
  */
-static void
-__perf_event_init_context(struct perf_event_context *ctx,
-                           struct task_struct *task)
+
+struct callchain_cpus_entries {
+       struct rcu_head                 rcu_head;
+       struct perf_callchain_entry     *cpu_entries[0];
+};
+
+static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+static atomic_t nr_callchain_events;
+static DEFINE_MUTEX(callchain_mutex);
+struct callchain_cpus_entries *callchain_cpus_entries;
+
+
+__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                                 struct pt_regs *regs)
 {
-       raw_spin_lock_init(&ctx->lock);
-       mutex_init(&ctx->mutex);
-       INIT_LIST_HEAD(&ctx->pinned_groups);
-       INIT_LIST_HEAD(&ctx->flexible_groups);
-       INIT_LIST_HEAD(&ctx->event_list);
-       atomic_set(&ctx->refcount, 1);
-       ctx->task = task;
 }
 
-static struct perf_event_context *find_get_context(pid_t pid, int cpu)
+__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+                               struct pt_regs *regs)
 {
-       struct perf_event_context *ctx;
-       struct perf_cpu_context *cpuctx;
-       struct task_struct *task;
-       unsigned long flags;
-       int err;
+}
 
-       if (pid == -1 && cpu != -1) {
-               /* Must be root to operate on a CPU event: */
-               if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
-                       return ERR_PTR(-EACCES);
+static void release_callchain_buffers_rcu(struct rcu_head *head)
+{
+       struct callchain_cpus_entries *entries;
+       int cpu;
 
-               if (cpu < 0 || cpu >= nr_cpumask_bits)
-                       return ERR_PTR(-EINVAL);
+       entries = container_of(head, struct callchain_cpus_entries, rcu_head);
 
-               /*
-                * We could be clever and allow to attach a event to an
-                * offline CPU and activate it when the CPU comes up, but
-                * that's for later.
-                */
-               if (!cpu_online(cpu))
-                       return ERR_PTR(-ENODEV);
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
 
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               ctx = &cpuctx->ctx;
-               get_ctx(ctx);
+       kfree(entries);
+}
 
-               return ctx;
-       }
+static void release_callchain_buffers(void)
+{
+       struct callchain_cpus_entries *entries;
 
-       rcu_read_lock();
-       if (!pid)
-               task = current;
-       else
-               task = find_task_by_vpid(pid);
-       if (task)
-               get_task_struct(task);
-       rcu_read_unlock();
+       entries = callchain_cpus_entries;
+       rcu_assign_pointer(callchain_cpus_entries, NULL);
+       call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+}
 
-       if (!task)
-               return ERR_PTR(-ESRCH);
+static int alloc_callchain_buffers(void)
+{
+       int cpu;
+       int size;
+       struct callchain_cpus_entries *entries;
 
        /*
-        * Can't attach events to a dying task.
+        * We can't use the percpu allocation API for data that can be
+        * accessed from NMI. Use a temporary manual per cpu allocation
+        * until that gets sorted out.
         */
-       err = -ESRCH;
-       if (task->flags & PF_EXITING)
-               goto errout;
+       size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
+               num_possible_cpus();
 
-       /* Reuse ptrace permission checks for now. */
-       err = -EACCES;
-       if (!ptrace_may_access(task, PTRACE_MODE_READ))
-               goto errout;
+       entries = kzalloc(size, GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
 
- retry:
-       ctx = perf_lock_task_context(task, &flags);
-       if (ctx) {
-               unclone_ctx(ctx);
-               raw_spin_unlock_irqrestore(&ctx->lock, flags);
-       }
+       size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
 
-       if (!ctx) {
-               ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
-               err = -ENOMEM;
-               if (!ctx)
-                       goto errout;
-               __perf_event_init_context(ctx, task);
-               get_ctx(ctx);
-               if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
-                       /*
-                        * We raced with some other task; use
-                        * the context they set.
-                        */
-                       kfree(ctx);
-                       goto retry;
-               }
-               get_task_struct(task);
+       for_each_possible_cpu(cpu) {
+               entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+                                                        cpu_to_node(cpu));
+               if (!entries->cpu_entries[cpu])
+                       goto fail;
        }
 
-       put_task_struct(task);
-       return ctx;
-
- errout:
-       put_task_struct(task);
-       return ERR_PTR(err);
-}
+       rcu_assign_pointer(callchain_cpus_entries, entries);
 
-static void perf_event_free_filter(struct perf_event *event);
+       return 0;
+
+fail:
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
+       kfree(entries);
+
+       return -ENOMEM;
+}
+
+static int get_callchain_buffers(void)
+{
+       int err = 0;
+       int count;
+
+       mutex_lock(&callchain_mutex);
+
+       count = atomic_inc_return(&nr_callchain_events);
+       if (WARN_ON_ONCE(count < 1)) {
+               err = -EINVAL;
+               goto exit;
+       }
+
+       if (count > 1) {
+               /* If the allocation failed, give up */
+               if (!callchain_cpus_entries)
+                       err = -ENOMEM;
+               goto exit;
+       }
+
+       err = alloc_callchain_buffers();
+       if (err)
+               release_callchain_buffers();
+exit:
+       mutex_unlock(&callchain_mutex);
+
+       return err;
+}
+
+static void put_callchain_buffers(void)
+{
+       if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+               release_callchain_buffers();
+               mutex_unlock(&callchain_mutex);
+       }
+}
+
+static int get_recursion_context(int *recursion)
+{
+       int rctx;
+
+       if (in_nmi())
+               rctx = 3;
+       else if (in_irq())
+               rctx = 2;
+       else if (in_softirq())
+               rctx = 1;
+       else
+               rctx = 0;
+
+       if (recursion[rctx])
+               return -1;
+
+       recursion[rctx]++;
+       barrier();
+
+       return rctx;
+}
+
+static inline void put_recursion_context(int *recursion, int rctx)
+{
+       barrier();
+       recursion[rctx]--;
+}
+
+static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+{
+       int cpu;
+       struct callchain_cpus_entries *entries;
+
+       *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+       if (*rctx == -1)
+               return NULL;
+
+       entries = rcu_dereference(callchain_cpus_entries);
+       if (!entries)
+               return NULL;
+
+       cpu = smp_processor_id();
+
+       return &entries->cpu_entries[cpu][*rctx];
+}
+
+static void
+put_callchain_entry(int rctx)
+{
+       put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+}
+
+static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+       int rctx;
+       struct perf_callchain_entry *entry;
+
+
+       entry = get_callchain_entry(&rctx);
+       if (rctx == -1)
+               return NULL;
+
+       if (!entry)
+               goto exit_put;
+
+       entry->nr = 0;
+
+       if (!user_mode(regs)) {
+               perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+               perf_callchain_kernel(entry, regs);
+               if (current->mm)
+                       regs = task_pt_regs(current);
+               else
+                       regs = NULL;
+       }
+
+       if (regs) {
+               perf_callchain_store(entry, PERF_CONTEXT_USER);
+               perf_callchain_user(entry, regs);
+       }
+
+exit_put:
+       put_callchain_entry(rctx);
+
+       return entry;
+}
+
+/*
+ * Initialize the perf_event context in a task_struct:
+ */
+static void __perf_event_init_context(struct perf_event_context *ctx)
+{
+       raw_spin_lock_init(&ctx->lock);
+       mutex_init(&ctx->mutex);
+       INIT_LIST_HEAD(&ctx->pinned_groups);
+       INIT_LIST_HEAD(&ctx->flexible_groups);
+       INIT_LIST_HEAD(&ctx->event_list);
+       atomic_set(&ctx->refcount, 1);
+}
+
+static struct perf_event_context *
+alloc_perf_context(struct pmu *pmu, struct task_struct *task)
+{
+       struct perf_event_context *ctx;
+
+       ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+       if (!ctx)
+               return NULL;
+
+       __perf_event_init_context(ctx);
+       if (task) {
+               ctx->task = task;
+               get_task_struct(task);
+       }
+       ctx->pmu = pmu;
+
+       return ctx;
+}
+
+static struct task_struct *
+find_lively_task_by_vpid(pid_t vpid)
+{
+       struct task_struct *task;
+       int err;
+
+       rcu_read_lock();
+       if (!vpid)
+               task = current;
+       else
+               task = find_task_by_vpid(vpid);
+       if (task)
+               get_task_struct(task);
+       rcu_read_unlock();
+
+       if (!task)
+               return ERR_PTR(-ESRCH);
+
+       /*
+        * Can't attach events to a dying task.
+        */
+       err = -ESRCH;
+       if (task->flags & PF_EXITING)
+               goto errout;
+
+       /* Reuse ptrace permission checks for now. */
+       err = -EACCES;
+       if (!ptrace_may_access(task, PTRACE_MODE_READ))
+               goto errout;
+
+       return task;
+errout:
+       put_task_struct(task);
+       return ERR_PTR(err);
+
+}
+
+static struct perf_event_context *
+find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
+{
+       struct perf_event_context *ctx;
+       struct perf_cpu_context *cpuctx;
+       unsigned long flags;
+       int ctxn, err;
+
+       if (!task && cpu != -1) {
+               /* Must be root to operate on a CPU event: */
+               if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+                       return ERR_PTR(-EACCES);
+
+               if (cpu < 0 || cpu >= nr_cpumask_bits)
+                       return ERR_PTR(-EINVAL);
+
+               /*
+                * We could be clever and allow to attach a event to an
+                * offline CPU and activate it when the CPU comes up, but
+                * that's for later.
+                */
+               if (!cpu_online(cpu))
+                       return ERR_PTR(-ENODEV);
+
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+               ctx = &cpuctx->ctx;
+               get_ctx(ctx);
+
+               return ctx;
+       }
+
+       err = -EINVAL;
+       ctxn = pmu->task_ctx_nr;
+       if (ctxn < 0)
+               goto errout;
+
+retry:
+       ctx = perf_lock_task_context(task, ctxn, &flags);
+       if (ctx) {
+               unclone_ctx(ctx);
+               raw_spin_unlock_irqrestore(&ctx->lock, flags);
+       }
+
+       if (!ctx) {
+               ctx = alloc_perf_context(pmu, task);
+               err = -ENOMEM;
+               if (!ctx)
+                       goto errout;
+
+               get_ctx(ctx);
+
+               if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
+                       /*
+                        * We raced with some other task; use
+                        * the context they set.
+                        */
+                       put_task_struct(task);
+                       kfree(ctx);
+                       goto retry;
+               }
+       }
+
+       put_task_struct(task);
+       return ctx;
+
+errout:
+       put_task_struct(task);
+       return ERR_PTR(err);
+}
+
+static void perf_event_free_filter(struct perf_event *event);
 
 static void free_event_rcu(struct rcu_head *head)
 {
@@ -1918,6 +2165,8 @@ static void free_event(struct perf_event *event)
                        atomic_dec(&nr_comm_events);
                if (event->attr.task)
                        atomic_dec(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       put_callchain_buffers();
        }
 
        if (event->buffer) {
@@ -1928,7 +2177,9 @@ static void free_event(struct perf_event *event)
        if (event->destroy)
                event->destroy(event);
 
-       put_ctx(event->ctx);
+       if (event->ctx)
+               put_ctx(event->ctx);
+
        call_rcu(&event->rcu_head, free_event_rcu);
 }
 
@@ -2349,6 +2600,9 @@ int perf_event_task_disable(void)
 
 static int perf_event_index(struct perf_event *event)
 {
+       if (event->hw.state & PERF_HES_STOPPED)
+               return 0;
+
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return 0;
 
@@ -2960,16 +3214,6 @@ void perf_event_do_pending(void)
        __perf_pending_run();
 }
 
-/*
- * Callchain support -- arch specific
- */
-
-__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       return NULL;
-}
-
-
 /*
  * We assume there is only KVM supporting the callbacks.
  * Later on, we might change it to a list if there is
@@ -3076,7 +3320,7 @@ again:
        if (handle->wakeup != local_read(&buffer->wakeup))
                perf_output_wakeup(handle);
 
- out:
+out:
        preempt_enable();
 }
 
@@ -3464,14 +3708,20 @@ static void perf_event_output(struct perf_event *event, int nmi,
        struct perf_output_handle handle;
        struct perf_event_header header;
 
+       /* protect the callchain buffers */
+       rcu_read_lock();
+
        perf_prepare_sample(&header, data, event, regs);
 
        if (perf_output_begin(&handle, event, header.size, nmi, 1))
-               return;
+               goto exit;
 
        perf_output_sample(&handle, &header, data, event);
 
        perf_output_end(&handle);
+
+exit:
+       rcu_read_unlock();
 }
 
 /*
@@ -3585,16 +3835,27 @@ static void perf_event_task_ctx(struct perf_event_context *ctx,
 static void perf_event_task_event(struct perf_task_event *task_event)
 {
        struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx = task_event->task_ctx;
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int ctxn;
 
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_task_ctx(&cpuctx->ctx, task_event);
-       if (!ctx)
-               ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_task_ctx(ctx, task_event);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_task_ctx(&cpuctx->ctx, task_event);
+
+               ctx = task_event->task_ctx;
+               if (!ctx) {
+                       ctxn = pmu->task_ctx_nr;
+                       if (ctxn < 0)
+                               goto next;
+                       ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               }
+               if (ctx)
+                       perf_event_task_ctx(ctx, task_event);
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
 }
 
@@ -3699,8 +3960,10 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
 {
        struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
-       unsigned int size;
        char comm[TASK_COMM_LEN];
+       unsigned int size;
+       struct pmu *pmu;
+       int ctxn;
 
        memset(comm, 0, sizeof(comm));
        strlcpy(comm, comm_event->task->comm, sizeof(comm));
@@ -3712,21 +3975,36 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
 
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_comm_ctx(&cpuctx->ctx, comm_event);
-       ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_comm_ctx(ctx, comm_event);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_comm_ctx(&cpuctx->ctx, comm_event);
+
+               ctxn = pmu->task_ctx_nr;
+               if (ctxn < 0)
+                       goto next;
+
+               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               if (ctx)
+                       perf_event_comm_ctx(ctx, comm_event);
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
 }
 
 void perf_event_comm(struct task_struct *task)
 {
        struct perf_comm_event comm_event;
+       struct perf_event_context *ctx;
+       int ctxn;
+
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (!ctx)
+                       continue;
 
-       if (task->perf_event_ctxp)
-               perf_event_enable_on_exec(task);
+               perf_event_enable_on_exec(ctx);
+       }
 
        if (!atomic_read(&nr_comm_events))
                return;
@@ -3828,6 +4106,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
        char tmp[16];
        char *buf = NULL;
        const char *name;
+       struct pmu *pmu;
+       int ctxn;
 
        memset(tmp, 0, sizeof(tmp));
 
@@ -3880,12 +4160,23 @@ got_name:
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
 
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
-       ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
+                                       vma->vm_flags & VM_EXEC);
+
+               ctxn = pmu->task_ctx_nr;
+               if (ctxn < 0)
+                       goto next;
+
+               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               if (ctx) {
+                       perf_event_mmap_ctx(ctx, mmap_event,
+                                       vma->vm_flags & VM_EXEC);
+               }
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
 
        kfree(buf);
@@ -3967,8 +4258,6 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
        struct hw_perf_event *hwc = &event->hw;
        int ret = 0;
 
-       throttle = (throttle && event->pmu->unthrottle != NULL);
-
        if (!throttle) {
                hwc->interrupts++;
        } else {
@@ -4036,6 +4325,17 @@ int perf_event_overflow(struct perf_event *event, int nmi,
  * Generic software event infrastructure
  */
 
+struct swevent_htable {
+       struct swevent_hlist            *swevent_hlist;
+       struct mutex                    hlist_mutex;
+       int                             hlist_refcount;
+
+       /* Recursion avoidance in each contexts */
+       int                             recursion[PERF_NR_CONTEXTS];
+};
+
+static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+
 /*
  * We directly increment event->count and keep a second value in
  * event->hw.period_left to count intervals. This period event
@@ -4093,7 +4393,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
        }
 }
 
-static void perf_swevent_add(struct perf_event *event, u64 nr,
+static void perf_swevent_event(struct perf_event *event, u64 nr,
                               int nmi, struct perf_sample_data *data,
                               struct pt_regs *regs)
 {
@@ -4119,6 +4419,9 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
 static int perf_exclude_event(struct perf_event *event,
                              struct pt_regs *regs)
 {
+       if (event->hw.state & PERF_HES_STOPPED)
+               return 0;
+
        if (regs) {
                if (event->attr.exclude_user && user_mode(regs))
                        return 1;
@@ -4165,11 +4468,11 @@ __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
 
 /* For the read side: events when they trigger */
 static inline struct hlist_head *
-find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
+find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
 {
        struct swevent_hlist *hlist;
 
-       hlist = rcu_dereference(ctx->swevent_hlist);
+       hlist = rcu_dereference(swhash->swevent_hlist);
        if (!hlist)
                return NULL;
 
@@ -4178,7 +4481,7 @@ find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
 
 /* For the event head insertion and removal in the hlist */
 static inline struct hlist_head *
-find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
+find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
 {
        struct swevent_hlist *hlist;
        u32 event_id = event->attr.config;
@@ -4189,7 +4492,7 @@ find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
         * and release. Which makes the protected version suitable here.
         * The context lock guarantees that.
         */
-       hlist = rcu_dereference_protected(ctx->swevent_hlist,
+       hlist = rcu_dereference_protected(swhash->swevent_hlist,
                                          lockdep_is_held(&event->ctx->lock));
        if (!hlist)
                return NULL;
@@ -4202,23 +4505,19 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
                                    struct perf_sample_data *data,
                                    struct pt_regs *regs)
 {
-       struct perf_cpu_context *cpuctx;
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
        struct perf_event *event;
        struct hlist_node *node;
        struct hlist_head *head;
 
-       cpuctx = &__get_cpu_var(perf_cpu_context);
-
        rcu_read_lock();
-
-       head = find_swevent_head_rcu(cpuctx, type, event_id);
-
+       head = find_swevent_head_rcu(swhash, type, event_id);
        if (!head)
                goto end;
 
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_swevent_match(event, type, event_id, data, regs))
-                       perf_swevent_add(event, nr, nmi, data, regs);
+                       perf_swevent_event(event, nr, nmi, data, regs);
        }
 end:
        rcu_read_unlock();
@@ -4226,33 +4525,17 @@ end:
 
 int perf_swevent_get_recursion_context(void)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       int rctx;
-
-       if (in_nmi())
-               rctx = 3;
-       else if (in_irq())
-               rctx = 2;
-       else if (in_softirq())
-               rctx = 1;
-       else
-               rctx = 0;
-
-       if (cpuctx->recursion[rctx])
-               return -1;
-
-       cpuctx->recursion[rctx]++;
-       barrier();
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
 
-       return rctx;
+       return get_recursion_context(swhash->recursion);
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
 void inline perf_swevent_put_recursion_context(int rctx)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       barrier();
-       cpuctx->recursion[rctx]--;
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+
+       put_recursion_context(swhash->recursion, rctx);
 }
 
 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
@@ -4278,20 +4561,20 @@ static void perf_swevent_read(struct perf_event *event)
 {
 }
 
-static int perf_swevent_enable(struct perf_event *event)
+static int perf_swevent_add(struct perf_event *event, int flags)
 {
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
        struct hw_perf_event *hwc = &event->hw;
-       struct perf_cpu_context *cpuctx;
        struct hlist_head *head;
 
-       cpuctx = &__get_cpu_var(perf_cpu_context);
-
        if (hwc->sample_period) {
                hwc->last_period = hwc->sample_period;
                perf_swevent_set_period(event);
        }
 
-       head = find_swevent_head(cpuctx, event);
+       hwc->state = !(flags & PERF_EF_START);
+
+       head = find_swevent_head(swhash, event);
        if (WARN_ON_ONCE(!head))
                return -EINVAL;
 
@@ -4300,202 +4583,27 @@ static int perf_swevent_enable(struct perf_event *event)
        return 0;
 }
 
-static void perf_swevent_disable(struct perf_event *event)
+static void perf_swevent_del(struct perf_event *event, int flags)
 {
        hlist_del_rcu(&event->hlist_entry);
 }
 
-static void perf_swevent_void(struct perf_event *event)
-{
-}
-
-static int perf_swevent_int(struct perf_event *event)
-{
-       return 0;
-}
-
-static const struct pmu perf_ops_generic = {
-       .enable         = perf_swevent_enable,
-       .disable        = perf_swevent_disable,
-       .start          = perf_swevent_int,
-       .stop           = perf_swevent_void,
-       .read           = perf_swevent_read,
-       .unthrottle     = perf_swevent_void, /* hwc->interrupts already reset */
-};
-
-/*
- * hrtimer based swevent callback
- */
-
-static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
-{
-       enum hrtimer_restart ret = HRTIMER_RESTART;
-       struct perf_sample_data data;
-       struct pt_regs *regs;
-       struct perf_event *event;
-       u64 period;
-
-       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
-       event->pmu->read(event);
-
-       perf_sample_data_init(&data, 0);
-       data.period = event->hw.last_period;
-       regs = get_irq_regs();
-
-       if (regs && !perf_exclude_event(event, regs)) {
-               if (!(event->attr.exclude_idle && current->pid == 0))
-                       if (perf_event_overflow(event, 0, &data, regs))
-                               ret = HRTIMER_NORESTART;
-       }
-
-       period = max_t(u64, 10000, event->hw.sample_period);
-       hrtimer_forward_now(hrtimer, ns_to_ktime(period));
-
-       return ret;
-}
-
-static void perf_swevent_start_hrtimer(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-
-       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       hwc->hrtimer.function = perf_swevent_hrtimer;
-       if (hwc->sample_period) {
-               u64 period;
-
-               if (hwc->remaining) {
-                       if (hwc->remaining < 0)
-                               period = 10000;
-                       else
-                               period = hwc->remaining;
-                       hwc->remaining = 0;
-               } else {
-                       period = max_t(u64, 10000, hwc->sample_period);
-               }
-               __hrtimer_start_range_ns(&hwc->hrtimer,
-                               ns_to_ktime(period), 0,
-                               HRTIMER_MODE_REL, 0);
-       }
-}
-
-static void perf_swevent_cancel_hrtimer(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-
-       if (hwc->sample_period) {
-               ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
-               hwc->remaining = ktime_to_ns(remaining);
-
-               hrtimer_cancel(&hwc->hrtimer);
-       }
-}
-
-/*
- * Software event: cpu wall time clock
- */
-
-static void cpu_clock_perf_event_update(struct perf_event *event)
+static void perf_swevent_start(struct perf_event *event, int flags)
 {
-       int cpu = raw_smp_processor_id();
-       s64 prev;
-       u64 now;
-
-       now = cpu_clock(cpu);
-       prev = local64_xchg(&event->hw.prev_count, now);
-       local64_add(now - prev, &event->count);
+       event->hw.state = 0;
 }
 
-static int cpu_clock_perf_event_enable(struct perf_event *event)
+static void perf_swevent_stop(struct perf_event *event, int flags)
 {
-       struct hw_perf_event *hwc = &event->hw;
-       int cpu = raw_smp_processor_id();
-
-       local64_set(&hwc->prev_count, cpu_clock(cpu));
-       perf_swevent_start_hrtimer(event);
-
-       return 0;
-}
-
-static void cpu_clock_perf_event_disable(struct perf_event *event)
-{
-       perf_swevent_cancel_hrtimer(event);
-       cpu_clock_perf_event_update(event);
+       event->hw.state = PERF_HES_STOPPED;
 }
 
-static void cpu_clock_perf_event_read(struct perf_event *event)
-{
-       cpu_clock_perf_event_update(event);
-}
-
-static const struct pmu perf_ops_cpu_clock = {
-       .enable         = cpu_clock_perf_event_enable,
-       .disable        = cpu_clock_perf_event_disable,
-       .read           = cpu_clock_perf_event_read,
-};
-
-/*
- * Software event: task time clock
- */
-
-static void task_clock_perf_event_update(struct perf_event *event, u64 now)
-{
-       u64 prev;
-       s64 delta;
-
-       prev = local64_xchg(&event->hw.prev_count, now);
-       delta = now - prev;
-       local64_add(delta, &event->count);
-}
-
-static int task_clock_perf_event_enable(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-       u64 now;
-
-       now = event->ctx->time;
-
-       local64_set(&hwc->prev_count, now);
-
-       perf_swevent_start_hrtimer(event);
-
-       return 0;
-}
-
-static void task_clock_perf_event_disable(struct perf_event *event)
-{
-       perf_swevent_cancel_hrtimer(event);
-       task_clock_perf_event_update(event, event->ctx->time);
-
-}
-
-static void task_clock_perf_event_read(struct perf_event *event)
-{
-       u64 time;
-
-       if (!in_nmi()) {
-               update_context_time(event->ctx);
-               time = event->ctx->time;
-       } else {
-               u64 now = perf_clock();
-               u64 delta = now - event->ctx->timestamp;
-               time = event->ctx->time + delta;
-       }
-
-       task_clock_perf_event_update(event, time);
-}
-
-static const struct pmu perf_ops_task_clock = {
-       .enable         = task_clock_perf_event_enable,
-       .disable        = task_clock_perf_event_disable,
-       .read           = task_clock_perf_event_read,
-};
-
 /* Deref the hlist from the update side */
 static inline struct swevent_hlist *
-swevent_hlist_deref(struct perf_cpu_context *cpuctx)
+swevent_hlist_deref(struct swevent_htable *swhash)
 {
-       return rcu_dereference_protected(cpuctx->swevent_hlist,
-                                        lockdep_is_held(&cpuctx->hlist_mutex));
+       return rcu_dereference_protected(swhash->swevent_hlist,
+                                        lockdep_is_held(&swhash->hlist_mutex));
 }
 
 static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
@@ -4506,27 +4614,27 @@ static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
        kfree(hlist);
 }
 
-static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
+static void swevent_hlist_release(struct swevent_htable *swhash)
 {
-       struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
+       struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
 
        if (!hlist)
                return;
 
-       rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
+       rcu_assign_pointer(swhash->swevent_hlist, NULL);
        call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
 }
 
 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
 {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
-       mutex_lock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
 
-       if (!--cpuctx->hlist_refcount)
-               swevent_hlist_release(cpuctx);
+       if (!--swhash->hlist_refcount)
+               swevent_hlist_release(swhash);
 
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_unlock(&swhash->hlist_mutex);
 }
 
 static void swevent_hlist_put(struct perf_event *event)
@@ -4544,12 +4652,12 @@ static void swevent_hlist_put(struct perf_event *event)
 
 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
 {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
        int err = 0;
 
-       mutex_lock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
 
-       if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
+       if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
                struct swevent_hlist *hlist;
 
                hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
@@ -4557,11 +4665,11 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
                        err = -ENOMEM;
                        goto exit;
                }
-               rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+               rcu_assign_pointer(swhash->swevent_hlist, hlist);
        }
-       cpuctx->hlist_refcount++;
- exit:
-       mutex_unlock(&cpuctx->hlist_mutex);
+       swhash->hlist_refcount++;
+exit:
+       mutex_unlock(&swhash->hlist_mutex);
 
        return err;
 }
@@ -4585,7 +4693,7 @@ static int swevent_hlist_get(struct perf_event *event)
        put_online_cpus();
 
        return 0;
- fail:
+fail:
        for_each_possible_cpu(cpu) {
                if (cpu == failed_cpu)
                        break;
@@ -4596,17 +4704,64 @@ static int swevent_hlist_get(struct perf_event *event)
        return err;
 }
 
-#ifdef CONFIG_EVENT_TRACING
+atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+
+static void sw_perf_event_destroy(struct perf_event *event)
+{
+       u64 event_id = event->attr.config;
 
-static const struct pmu perf_ops_tracepoint = {
-       .enable         = perf_trace_enable,
-       .disable        = perf_trace_disable,
-       .start          = perf_swevent_int,
-       .stop           = perf_swevent_void,
+       WARN_ON(event->parent);
+
+       atomic_dec(&perf_swevent_enabled[event_id]);
+       swevent_hlist_put(event);
+}
+
+static int perf_swevent_init(struct perf_event *event)
+{
+       int event_id = event->attr.config;
+
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+
+       switch (event_id) {
+       case PERF_COUNT_SW_CPU_CLOCK:
+       case PERF_COUNT_SW_TASK_CLOCK:
+               return -ENOENT;
+
+       default:
+               break;
+       }
+
+       if (event_id > PERF_COUNT_SW_MAX)
+               return -ENOENT;
+
+       if (!event->parent) {
+               int err;
+
+               err = swevent_hlist_get(event);
+               if (err)
+                       return err;
+
+               atomic_inc(&perf_swevent_enabled[event_id]);
+               event->destroy = sw_perf_event_destroy;
+       }
+
+       return 0;
+}
+
+static struct pmu perf_swevent = {
+       .task_ctx_nr    = perf_sw_context,
+
+       .event_init     = perf_swevent_init,
+       .add            = perf_swevent_add,
+       .del            = perf_swevent_del,
+       .start          = perf_swevent_start,
+       .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
-       .unthrottle     = perf_swevent_void,
 };
 
+#ifdef CONFIG_EVENT_TRACING
+
 static int perf_tp_filter_match(struct perf_event *event,
                                struct perf_sample_data *data)
 {
@@ -4650,7 +4805,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
 
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_tp_event_match(event, &data, regs))
-                       perf_swevent_add(event, count, 1, &data, regs);
+                       perf_swevent_event(event, count, 1, &data, regs);
        }
 
        perf_swevent_put_recursion_context(rctx);
@@ -4662,10 +4817,13 @@ static void tp_perf_event_destroy(struct perf_event *event)
        perf_trace_destroy(event);
 }
 
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
+static int perf_tp_event_init(struct perf_event *event)
 {
        int err;
 
+       if (event->attr.type != PERF_TYPE_TRACEPOINT)
+               return -ENOENT;
+
        /*
         * Raw tracepoint data is a severe data leak, only allow root to
         * have these.
@@ -4673,15 +4831,31 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
        if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
                        perf_paranoid_tracepoint_raw() &&
                        !capable(CAP_SYS_ADMIN))
-               return ERR_PTR(-EPERM);
+               return -EPERM;
 
        err = perf_trace_init(event);
        if (err)
-               return NULL;
+               return err;
 
        event->destroy = tp_perf_event_destroy;
 
-       return &perf_ops_tracepoint;
+       return 0;
+}
+
+static struct pmu perf_tracepoint = {
+       .task_ctx_nr    = perf_sw_context,
+
+       .event_init     = perf_tp_event_init,
+       .add            = perf_trace_add,
+       .del            = perf_trace_del,
+       .start          = perf_swevent_start,
+       .stop           = perf_swevent_stop,
+       .read           = perf_swevent_read,
+};
+
+static inline void perf_tp_register(void)
+{
+       perf_pmu_register(&perf_tracepoint);
 }
 
 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@ -4702,129 +4876,412 @@ static int perf_event_set_filter(struct perf_event *event, void __user *arg)
        return ret;
 }
 
-static void perf_event_free_filter(struct perf_event *event)
+static void perf_event_free_filter(struct perf_event *event)
+{
+       ftrace_profile_free_filter(event);
+}
+
+#else
+
+static inline void perf_tp_register(void)
+{
+}
+
+static int perf_event_set_filter(struct perf_event *event, void __user *arg)
+{
+       return -ENOENT;
+}
+
+static void perf_event_free_filter(struct perf_event *event)
+{
+}
+
+#endif /* CONFIG_EVENT_TRACING */
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+void perf_bp_event(struct perf_event *bp, void *data)
+{
+       struct perf_sample_data sample;
+       struct pt_regs *regs = data;
+
+       perf_sample_data_init(&sample, bp->attr.bp_addr);
+
+       if (!bp->hw.state && !perf_exclude_event(bp, regs))
+               perf_swevent_event(bp, 1, 1, &sample, regs);
+}
+#endif
+
+/*
+ * hrtimer based swevent callback
+ */
+
+static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+{
+       enum hrtimer_restart ret = HRTIMER_RESTART;
+       struct perf_sample_data data;
+       struct pt_regs *regs;
+       struct perf_event *event;
+       u64 period;
+
+       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
+       event->pmu->read(event);
+
+       perf_sample_data_init(&data, 0);
+       data.period = event->hw.last_period;
+       regs = get_irq_regs();
+
+       if (regs && !perf_exclude_event(event, regs)) {
+               if (!(event->attr.exclude_idle && current->pid == 0))
+                       if (perf_event_overflow(event, 0, &data, regs))
+                               ret = HRTIMER_NORESTART;
+       }
+
+       period = max_t(u64, 10000, event->hw.sample_period);
+       hrtimer_forward_now(hrtimer, ns_to_ktime(period));
+
+       return ret;
+}
+
+static void perf_swevent_start_hrtimer(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hwc->hrtimer.function = perf_swevent_hrtimer;
+       if (hwc->sample_period) {
+               s64 period = local64_read(&hwc->period_left);
+
+               if (period) {
+                       if (period < 0)
+                               period = 10000;
+
+                       local64_set(&hwc->period_left, 0);
+               } else {
+                       period = max_t(u64, 10000, hwc->sample_period);
+               }
+               __hrtimer_start_range_ns(&hwc->hrtimer,
+                               ns_to_ktime(period), 0,
+                               HRTIMER_MODE_REL_PINNED, 0);
+       }
+}
+
+static void perf_swevent_cancel_hrtimer(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (hwc->sample_period) {
+               ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
+               local64_set(&hwc->period_left, ktime_to_ns(remaining));
+
+               hrtimer_cancel(&hwc->hrtimer);
+       }
+}
+
+/*
+ * Software event: cpu wall time clock
+ */
+
+static void cpu_clock_event_update(struct perf_event *event)
+{
+       s64 prev;
+       u64 now;
+
+       now = local_clock();
+       prev = local64_xchg(&event->hw.prev_count, now);
+       local64_add(now - prev, &event->count);
+}
+
+static void cpu_clock_event_start(struct perf_event *event, int flags)
+{
+       local64_set(&event->hw.prev_count, local_clock());
+       perf_swevent_start_hrtimer(event);
+}
+
+static void cpu_clock_event_stop(struct perf_event *event, int flags)
+{
+       perf_swevent_cancel_hrtimer(event);
+       cpu_clock_event_update(event);
+}
+
+static int cpu_clock_event_add(struct perf_event *event, int flags)
+{
+       if (flags & PERF_EF_START)
+               cpu_clock_event_start(event, flags);
+
+       return 0;
+}
+
+static void cpu_clock_event_del(struct perf_event *event, int flags)
+{
+       cpu_clock_event_stop(event, flags);
+}
+
+static void cpu_clock_event_read(struct perf_event *event)
+{
+       cpu_clock_event_update(event);
+}
+
+static int cpu_clock_event_init(struct perf_event *event)
+{
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+
+       if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
+               return -ENOENT;
+
+       return 0;
+}
+
+static struct pmu perf_cpu_clock = {
+       .task_ctx_nr    = perf_sw_context,
+
+       .event_init     = cpu_clock_event_init,
+       .add            = cpu_clock_event_add,
+       .del            = cpu_clock_event_del,
+       .start          = cpu_clock_event_start,
+       .stop           = cpu_clock_event_stop,
+       .read           = cpu_clock_event_read,
+};
+
+/*
+ * Software event: task time clock
+ */
+
+static void task_clock_event_update(struct perf_event *event, u64 now)
+{
+       u64 prev;
+       s64 delta;
+
+       prev = local64_xchg(&event->hw.prev_count, now);
+       delta = now - prev;
+       local64_add(delta, &event->count);
+}
+
+static void task_clock_event_start(struct perf_event *event, int flags)
+{
+       local64_set(&event->hw.prev_count, event->ctx->time);
+       perf_swevent_start_hrtimer(event);
+}
+
+static void task_clock_event_stop(struct perf_event *event, int flags)
+{
+       perf_swevent_cancel_hrtimer(event);
+       task_clock_event_update(event, event->ctx->time);
+}
+
+static int task_clock_event_add(struct perf_event *event, int flags)
+{
+       if (flags & PERF_EF_START)
+               task_clock_event_start(event, flags);
+
+       return 0;
+}
+
+static void task_clock_event_del(struct perf_event *event, int flags)
+{
+       task_clock_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void task_clock_event_read(struct perf_event *event)
+{
+       u64 time;
+
+       if (!in_nmi()) {
+               update_context_time(event->ctx);
+               time = event->ctx->time;
+       } else {
+               u64 now = perf_clock();
+               u64 delta = now - event->ctx->timestamp;
+               time = event->ctx->time + delta;
+       }
+
+       task_clock_event_update(event, time);
+}
+
+static int task_clock_event_init(struct perf_event *event)
 {
-       ftrace_profile_free_filter(event);
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+
+       if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
+               return -ENOENT;
+
+       return 0;
 }
 
-#else
+static struct pmu perf_task_clock = {
+       .task_ctx_nr    = perf_sw_context,
+
+       .event_init     = task_clock_event_init,
+       .add            = task_clock_event_add,
+       .del            = task_clock_event_del,
+       .start          = task_clock_event_start,
+       .stop           = task_clock_event_stop,
+       .read           = task_clock_event_read,
+};
 
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
+static void perf_pmu_nop_void(struct pmu *pmu)
 {
-       return NULL;
 }
 
-static int perf_event_set_filter(struct perf_event *event, void __user *arg)
+static int perf_pmu_nop_int(struct pmu *pmu)
 {
-       return -ENOENT;
+       return 0;
 }
 
-static void perf_event_free_filter(struct perf_event *event)
+static void perf_pmu_start_txn(struct pmu *pmu)
 {
+       perf_pmu_disable(pmu);
 }
 
-#endif /* CONFIG_EVENT_TRACING */
+static int perf_pmu_commit_txn(struct pmu *pmu)
+{
+       perf_pmu_enable(pmu);
+       return 0;
+}
 
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-static void bp_perf_event_destroy(struct perf_event *event)
+static void perf_pmu_cancel_txn(struct pmu *pmu)
 {
-       release_bp_slot(event);
+       perf_pmu_enable(pmu);
 }
 
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+/*
+ * Ensures all contexts with the same task_ctx_nr have the same
+ * pmu_cpu_context too.
+ */
+static void *find_pmu_context(int ctxn)
 {
-       int err;
+       struct pmu *pmu;
 
-       err = register_perf_hw_breakpoint(bp);
-       if (err)
-               return ERR_PTR(err);
+       if (ctxn < 0)
+               return NULL;
 
-       bp->destroy = bp_perf_event_destroy;
+       list_for_each_entry(pmu, &pmus, entry) {
+               if (pmu->task_ctx_nr == ctxn)
+                       return pmu->pmu_cpu_context;
+       }
 
-       return &perf_ops_bp;
+       return NULL;
 }
 
-void perf_bp_event(struct perf_event *bp, void *data)
+static void free_pmu_context(void * __percpu cpu_context)
 {
-       struct perf_sample_data sample;
-       struct pt_regs *regs = data;
+       struct pmu *pmu;
 
-       perf_sample_data_init(&sample, bp->attr.bp_addr);
+       mutex_lock(&pmus_lock);
+       /*
+        * Like a real lame refcount.
+        */
+       list_for_each_entry(pmu, &pmus, entry) {
+               if (pmu->pmu_cpu_context == cpu_context)
+                       goto out;
+       }
 
-       if (!perf_exclude_event(bp, regs))
-               perf_swevent_add(bp, 1, 1, &sample, regs);
-}
-#else
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
-{
-       return NULL;
+       free_percpu(cpu_context);
+out:
+       mutex_unlock(&pmus_lock);
 }
 
-void perf_bp_event(struct perf_event *bp, void *regs)
+int perf_pmu_register(struct pmu *pmu)
 {
-}
-#endif
+       int cpu, ret;
 
-atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+       mutex_lock(&pmus_lock);
+       ret = -ENOMEM;
+       pmu->pmu_disable_count = alloc_percpu(int);
+       if (!pmu->pmu_disable_count)
+               goto unlock;
 
-static void sw_perf_event_destroy(struct perf_event *event)
-{
-       u64 event_id = event->attr.config;
+       pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
+       if (pmu->pmu_cpu_context)
+               goto got_cpu_context;
 
-       WARN_ON(event->parent);
+       pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
+       if (!pmu->pmu_cpu_context)
+               goto free_pdc;
 
-       atomic_dec(&perf_swevent_enabled[event_id]);
-       swevent_hlist_put(event);
+       for_each_possible_cpu(cpu) {
+               struct perf_cpu_context *cpuctx;
+
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+               __perf_event_init_context(&cpuctx->ctx);
+               cpuctx->ctx.type = cpu_context;
+               cpuctx->ctx.pmu = pmu;
+               cpuctx->jiffies_interval = 1;
+               INIT_LIST_HEAD(&cpuctx->rotation_list);
+       }
+
+got_cpu_context:
+       if (!pmu->start_txn) {
+               if (pmu->pmu_enable) {
+                       /*
+                        * If we have pmu_enable/pmu_disable calls, install
+                        * transaction stubs that use that to try and batch
+                        * hardware accesses.
+                        */
+                       pmu->start_txn  = perf_pmu_start_txn;
+                       pmu->commit_txn = perf_pmu_commit_txn;
+                       pmu->cancel_txn = perf_pmu_cancel_txn;
+               } else {
+                       pmu->start_txn  = perf_pmu_nop_void;
+                       pmu->commit_txn = perf_pmu_nop_int;
+                       pmu->cancel_txn = perf_pmu_nop_void;
+               }
+       }
+
+       if (!pmu->pmu_enable) {
+               pmu->pmu_enable  = perf_pmu_nop_void;
+               pmu->pmu_disable = perf_pmu_nop_void;
+       }
+
+       list_add_rcu(&pmu->entry, &pmus);
+       ret = 0;
+unlock:
+       mutex_unlock(&pmus_lock);
+
+       return ret;
+
+free_pdc:
+       free_percpu(pmu->pmu_disable_count);
+       goto unlock;
 }
 
-static const struct pmu *sw_perf_event_init(struct perf_event *event)
+void perf_pmu_unregister(struct pmu *pmu)
 {
-       const struct pmu *pmu = NULL;
-       u64 event_id = event->attr.config;
+       mutex_lock(&pmus_lock);
+       list_del_rcu(&pmu->entry);
+       mutex_unlock(&pmus_lock);
 
        /*
-        * Software events (currently) can't in general distinguish
-        * between user, kernel and hypervisor events.
-        * However, context switches and cpu migrations are considered
-        * to be kernel events, and page faults are never hypervisor
-        * events.
+        * We dereference the pmu list under both SRCU and regular RCU, so
+        * synchronize against both of those.
         */
-       switch (event_id) {
-       case PERF_COUNT_SW_CPU_CLOCK:
-               pmu = &perf_ops_cpu_clock;
+       synchronize_srcu(&pmus_srcu);
+       synchronize_rcu();
 
-               break;
-       case PERF_COUNT_SW_TASK_CLOCK:
-               /*
-                * If the user instantiates this as a per-cpu event,
-                * use the cpu_clock event instead.
-                */
-               if (event->ctx->task)
-                       pmu = &perf_ops_task_clock;
-               else
-                       pmu = &perf_ops_cpu_clock;
+       free_percpu(pmu->pmu_disable_count);
+       free_pmu_context(pmu->pmu_cpu_context);
+}
 
-               break;
-       case PERF_COUNT_SW_PAGE_FAULTS:
-       case PERF_COUNT_SW_PAGE_FAULTS_MIN:
-       case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
-       case PERF_COUNT_SW_CONTEXT_SWITCHES:
-       case PERF_COUNT_SW_CPU_MIGRATIONS:
-       case PERF_COUNT_SW_ALIGNMENT_FAULTS:
-       case PERF_COUNT_SW_EMULATION_FAULTS:
-               if (!event->parent) {
-                       int err;
-
-                       err = swevent_hlist_get(event);
-                       if (err)
-                               return ERR_PTR(err);
+struct pmu *perf_init_event(struct perf_event *event)
+{
+       struct pmu *pmu = NULL;
+       int idx;
+
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               int ret = pmu->event_init(event);
+               if (!ret)
+                       goto unlock;
 
-                       atomic_inc(&perf_swevent_enabled[event_id]);
-                       event->destroy = sw_perf_event_destroy;
+               if (ret != -ENOENT) {
+                       pmu = ERR_PTR(ret);
+                       goto unlock;
                }
-               pmu = &perf_ops_generic;
-               break;
        }
+       pmu = ERR_PTR(-ENOENT);
+unlock:
+       srcu_read_unlock(&pmus_srcu, idx);
 
        return pmu;
 }
@@ -4833,20 +5290,17 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event)
  * Allocate and initialize a event structure
  */
 static struct perf_event *
-perf_event_alloc(struct perf_event_attr *attr,
-                  int cpu,
-                  struct perf_event_context *ctx,
+perf_event_alloc(struct perf_event_attr *attr, int cpu,
                   struct perf_event *group_leader,
                   struct perf_event *parent_event,
-                  perf_overflow_handler_t overflow_handler,
-                  gfp_t gfpflags)
+                  perf_overflow_handler_t overflow_handler)
 {
-       const struct pmu *pmu;
+       struct pmu *pmu;
        struct perf_event *event;
        struct hw_perf_event *hwc;
        long err;
 
-       event = kzalloc(sizeof(*event), gfpflags);
+       event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (!event)
                return ERR_PTR(-ENOMEM);
 
@@ -4871,7 +5325,6 @@ perf_event_alloc(struct perf_event_attr *attr,
        event->attr             = *attr;
        event->group_leader     = group_leader;
        event->pmu              = NULL;
-       event->ctx              = ctx;
        event->oncpu            = -1;
 
        event->parent           = parent_event;
@@ -4905,29 +5358,8 @@ perf_event_alloc(struct perf_event_attr *attr,
        if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
                goto done;
 
-       switch (attr->type) {
-       case PERF_TYPE_RAW:
-       case PERF_TYPE_HARDWARE:
-       case PERF_TYPE_HW_CACHE:
-               pmu = hw_perf_event_init(event);
-               break;
-
-       case PERF_TYPE_SOFTWARE:
-               pmu = sw_perf_event_init(event);
-               break;
-
-       case PERF_TYPE_TRACEPOINT:
-               pmu = tp_perf_event_init(event);
-               break;
-
-       case PERF_TYPE_BREAKPOINT:
-               pmu = bp_perf_event_init(event);
-               break;
-
+       pmu = perf_init_event(event);
 
-       default:
-               break;
-       }
 done:
        err = 0;
        if (!pmu)
@@ -4952,6 +5384,13 @@ done:
                        atomic_inc(&nr_comm_events);
                if (event->attr.task)
                        atomic_inc(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+                       err = get_callchain_buffers();
+                       if (err) {
+                               free_event(event);
+                               return ERR_PTR(err);
+                       }
+               }
        }
 
        return event;
@@ -5099,12 +5538,16 @@ SYSCALL_DEFINE5(perf_event_open,
                struct perf_event_attr __user *, attr_uptr,
                pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
 {
-       struct perf_event *event, *group_leader = NULL, *output_event = NULL;
+       struct perf_event *group_leader = NULL, *output_event = NULL;
+       struct perf_event *event, *sibling;
        struct perf_event_attr attr;
        struct perf_event_context *ctx;
        struct file *event_file = NULL;
        struct file *group_file = NULL;
+       struct task_struct *task = NULL;
+       struct pmu *pmu;
        int event_fd;
+       int move_group = 0;
        int fput_needed = 0;
        int err;
 
@@ -5130,20 +5573,11 @@ SYSCALL_DEFINE5(perf_event_open,
        if (event_fd < 0)
                return event_fd;
 
-       /*
-        * Get the target context (task or percpu):
-        */
-       ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto err_fd;
-       }
-
        if (group_fd != -1) {
                group_leader = perf_fget_light(group_fd, &fput_needed);
                if (IS_ERR(group_leader)) {
                        err = PTR_ERR(group_leader);
-                       goto err_put_context;
+                       goto err_fd;
                }
                group_file = group_leader->filp;
                if (flags & PERF_FLAG_FD_OUTPUT)
@@ -5152,6 +5586,58 @@ SYSCALL_DEFINE5(perf_event_open,
                        group_leader = NULL;
        }
 
+       event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL);
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
+               goto err_fd;
+       }
+
+       /*
+        * Special case software events and allow them to be part of
+        * any hardware group.
+        */
+       pmu = event->pmu;
+
+       if (group_leader &&
+           (is_software_event(event) != is_software_event(group_leader))) {
+               if (is_software_event(event)) {
+                       /*
+                        * If event and group_leader are not both a software
+                        * event, and event is, then group leader is not.
+                        *
+                        * Allow the addition of software events to !software
+                        * groups, this is safe because software events never
+                        * fail to schedule.
+                        */
+                       pmu = group_leader->pmu;
+               } else if (is_software_event(group_leader) &&
+                          (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
+                       /*
+                        * In case the group is a pure software group, and we
+                        * try to add a hardware event, move the whole group to
+                        * the hardware context.
+                        */
+                       move_group = 1;
+               }
+       }
+
+       if (pid != -1) {
+               task = find_lively_task_by_vpid(pid);
+               if (IS_ERR(task)) {
+                       err = PTR_ERR(task);
+                       goto err_group_fd;
+               }
+       }
+
+       /*
+        * Get the target context (task or percpu):
+        */
+       ctx = find_get_context(pmu, task, cpu);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_group_fd;
+       }
+
        /*
         * Look up the group leader (we will attach this event to it):
         */
@@ -5163,42 +5649,66 @@ SYSCALL_DEFINE5(perf_event_open,
                 * becoming part of another group-sibling):
                 */
                if (group_leader->group_leader != group_leader)
-                       goto err_put_context;
+                       goto err_context;
                /*
                 * Do not allow to attach to a group in a different
                 * task or CPU context:
                 */
-               if (group_leader->ctx != ctx)
-                       goto err_put_context;
+               if (move_group) {
+                       if (group_leader->ctx->type != ctx->type)
+                               goto err_context;
+               } else {
+                       if (group_leader->ctx != ctx)
+                               goto err_context;
+               }
+
                /*
                 * Only a group leader can be exclusive or pinned
                 */
                if (attr.exclusive || attr.pinned)
-                       goto err_put_context;
-       }
-
-       event = perf_event_alloc(&attr, cpu, ctx, group_leader,
-                                    NULL, NULL, GFP_KERNEL);
-       if (IS_ERR(event)) {
-               err = PTR_ERR(event);
-               goto err_put_context;
+                       goto err_context;
        }
 
        if (output_event) {
                err = perf_event_set_output(event, output_event);
                if (err)
-                       goto err_free_put_context;
+                       goto err_context;
        }
 
        event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
        if (IS_ERR(event_file)) {
                err = PTR_ERR(event_file);
-               goto err_free_put_context;
+               goto err_context;
+       }
+
+       if (move_group) {
+               struct perf_event_context *gctx = group_leader->ctx;
+
+               mutex_lock(&gctx->mutex);
+               perf_event_remove_from_context(group_leader);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+                       perf_event_remove_from_context(sibling);
+                       put_ctx(gctx);
+               }
+               mutex_unlock(&gctx->mutex);
+               put_ctx(gctx);
        }
 
        event->filp = event_file;
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
+
+       if (move_group) {
+               perf_install_in_context(ctx, group_leader, cpu);
+               get_ctx(ctx);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+                       perf_install_in_context(ctx, sibling, cpu);
+                       get_ctx(ctx);
+               }
+       }
+
        perf_install_in_context(ctx, event, cpu);
        ++ctx->generation;
        mutex_unlock(&ctx->mutex);
@@ -5219,11 +5729,11 @@ SYSCALL_DEFINE5(perf_event_open,
        fd_install(event_fd, event_file);
        return event_fd;
 
-err_free_put_context:
-       free_event(event);
-err_put_context:
-       fput_light(group_file, fput_needed);
+err_context:
        put_ctx(ctx);
+err_group_fd:
+       fput_light(group_file, fput_needed);
+       free_event(event);
 err_fd:
        put_unused_fd(event_fd);
        return err;
@@ -5234,154 +5744,54 @@ err_fd:
  *
  * @attr: attributes of the counter to create
  * @cpu: cpu in which the counter is bound
- * @pid: task to profile
+ * @task: task to profile (NULL for percpu)
  */
 struct perf_event *
 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
-                                pid_t pid,
+                                struct task_struct *task,
                                 perf_overflow_handler_t overflow_handler)
 {
-       struct perf_event *event;
        struct perf_event_context *ctx;
-       int err;
-
-       /*
-        * Get the target context (task or percpu):
-        */
-
-       ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto err_exit;
-       }
-
-       event = perf_event_alloc(attr, cpu, ctx, NULL,
-                                NULL, overflow_handler, GFP_KERNEL);
-       if (IS_ERR(event)) {
-               err = PTR_ERR(event);
-               goto err_put_context;
-       }
-
-       event->filp = NULL;
-       WARN_ON_ONCE(ctx->parent_ctx);
-       mutex_lock(&ctx->mutex);
-       perf_install_in_context(ctx, event, cpu);
-       ++ctx->generation;
-       mutex_unlock(&ctx->mutex);
-
-       event->owner = current;
-       get_task_struct(current);
-       mutex_lock(&current->perf_event_mutex);
-       list_add_tail(&event->owner_entry, &current->perf_event_list);
-       mutex_unlock(&current->perf_event_mutex);
-
-       return event;
-
- err_put_context:
-       put_ctx(ctx);
- err_exit:
-       return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
-
-/*
- * inherit a event from parent task to child task:
- */
-static struct perf_event *
-inherit_event(struct perf_event *parent_event,
-             struct task_struct *parent,
-             struct perf_event_context *parent_ctx,
-             struct task_struct *child,
-             struct perf_event *group_leader,
-             struct perf_event_context *child_ctx)
-{
-       struct perf_event *child_event;
-
-       /*
-        * Instead of creating recursive hierarchies of events,
-        * we link inherited events back to the original parent,
-        * which has a filp for sure, which we use as the reference
-        * count:
-        */
-       if (parent_event->parent)
-               parent_event = parent_event->parent;
-
-       child_event = perf_event_alloc(&parent_event->attr,
-                                          parent_event->cpu, child_ctx,
-                                          group_leader, parent_event,
-                                          NULL, GFP_KERNEL);
-       if (IS_ERR(child_event))
-               return child_event;
-       get_ctx(child_ctx);
-
-       /*
-        * Make the child state follow the state of the parent event,
-        * not its attr.disabled bit.  We hold the parent's mutex,
-        * so we won't race with perf_event_{en, dis}able_family.
-        */
-       if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
-               child_event->state = PERF_EVENT_STATE_INACTIVE;
-       else
-               child_event->state = PERF_EVENT_STATE_OFF;
-
-       if (parent_event->attr.freq) {
-               u64 sample_period = parent_event->hw.sample_period;
-               struct hw_perf_event *hwc = &child_event->hw;
-
-               hwc->sample_period = sample_period;
-               hwc->last_period   = sample_period;
-
-               local64_set(&hwc->period_left, sample_period);
-       }
-
-       child_event->overflow_handler = parent_event->overflow_handler;
-
-       /*
-        * Link it up in the child's context:
-        */
-       add_event_to_ctx(child_event, child_ctx);
-
-       /*
-        * Get a reference to the parent filp - we will fput it
-        * when the child event exits. This is safe to do because
-        * we are in the parent and we know that the filp still
-        * exists and has a nonzero count:
-        */
-       atomic_long_inc(&parent_event->filp->f_count);
-
-       /*
-        * Link this into the parent event's child list
-        */
-       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
-       mutex_lock(&parent_event->child_mutex);
-       list_add_tail(&child_event->child_list, &parent_event->child_list);
-       mutex_unlock(&parent_event->child_mutex);
+       struct perf_event *event;
+       int err;
 
-       return child_event;
-}
+       /*
+        * Get the target context (task or percpu):
+        */
 
-static int inherit_group(struct perf_event *parent_event,
-             struct task_struct *parent,
-             struct perf_event_context *parent_ctx,
-             struct task_struct *child,
-             struct perf_event_context *child_ctx)
-{
-       struct perf_event *leader;
-       struct perf_event *sub;
-       struct perf_event *child_ctr;
+       event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler);
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
+               goto err;
+       }
 
-       leader = inherit_event(parent_event, parent, parent_ctx,
-                                child, NULL, child_ctx);
-       if (IS_ERR(leader))
-               return PTR_ERR(leader);
-       list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
-               child_ctr = inherit_event(sub, parent, parent_ctx,
-                                           child, leader, child_ctx);
-               if (IS_ERR(child_ctr))
-                       return PTR_ERR(child_ctr);
+       ctx = find_get_context(event->pmu, task, cpu);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_free;
        }
-       return 0;
+
+       event->filp = NULL;
+       WARN_ON_ONCE(ctx->parent_ctx);
+       mutex_lock(&ctx->mutex);
+       perf_install_in_context(ctx, event, cpu);
+       ++ctx->generation;
+       mutex_unlock(&ctx->mutex);
+
+       event->owner = current;
+       get_task_struct(current);
+       mutex_lock(&current->perf_event_mutex);
+       list_add_tail(&event->owner_entry, &current->perf_event_list);
+       mutex_unlock(&current->perf_event_mutex);
+
+       return event;
+
+err_free:
+       free_event(event);
+err:
+       return ERR_PTR(err);
 }
+EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
 
 static void sync_child_event(struct perf_event *child_event,
                               struct task_struct *child)
@@ -5439,16 +5849,13 @@ __perf_event_exit_task(struct perf_event *child_event,
        }
 }
 
-/*
- * When a child task exits, feed back event values to parent events.
- */
-void perf_event_exit_task(struct task_struct *child)
+static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 {
        struct perf_event *child_event, *tmp;
        struct perf_event_context *child_ctx;
        unsigned long flags;
 
-       if (likely(!child->perf_event_ctxp)) {
+       if (likely(!child->perf_event_ctxp[ctxn])) {
                perf_event_task(child, NULL, 0);
                return;
        }
@@ -5460,7 +5867,7 @@ void perf_event_exit_task(struct task_struct *child)
         * scheduled, so we are now safe from rescheduling changing
         * our context.
         */
-       child_ctx = child->perf_event_ctxp;
+       child_ctx = child->perf_event_ctxp[ctxn];
        __perf_event_task_sched_out(child_ctx);
 
        /*
@@ -5469,7 +5876,7 @@ void perf_event_exit_task(struct task_struct *child)
         * incremented the context's refcount before we do put_ctx below.
         */
        raw_spin_lock(&child_ctx->lock);
-       child->perf_event_ctxp = NULL;
+       child->perf_event_ctxp[ctxn] = NULL;
        /*
         * If this context is a clone; unclone it so it can't get
         * swapped to another process while we're removing all
@@ -5522,6 +5929,17 @@ again:
        put_ctx(child_ctx);
 }
 
+/*
+ * When a child task exits, feed back event values to parent events.
+ */
+void perf_event_exit_task(struct task_struct *child)
+{
+       int ctxn;
+
+       for_each_task_context_nr(ctxn)
+               perf_event_exit_task_context(child, ctxn);
+}
+
 static void perf_free_event(struct perf_event *event,
                            struct perf_event_context *ctx)
 {
@@ -5543,48 +5961,165 @@ static void perf_free_event(struct perf_event *event,
 
 /*
  * free an unexposed, unused context as created by inheritance by
- * init_task below, used by fork() in case of fail.
+ * perf_event_init_task below, used by fork() in case of fail.
  */
 void perf_event_free_task(struct task_struct *task)
 {
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_event_context *ctx;
        struct perf_event *event, *tmp;
+       int ctxn;
 
-       if (!ctx)
-               return;
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (!ctx)
+                       continue;
 
-       mutex_lock(&ctx->mutex);
+               mutex_lock(&ctx->mutex);
 again:
-       list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
-               perf_free_event(event, ctx);
+               list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
+                               group_entry)
+                       perf_free_event(event, ctx);
 
-       list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
-                                group_entry)
-               perf_free_event(event, ctx);
+               list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
+                               group_entry)
+                       perf_free_event(event, ctx);
 
-       if (!list_empty(&ctx->pinned_groups) ||
-           !list_empty(&ctx->flexible_groups))
-               goto again;
+               if (!list_empty(&ctx->pinned_groups) ||
+                               !list_empty(&ctx->flexible_groups))
+                       goto again;
 
-       mutex_unlock(&ctx->mutex);
+               mutex_unlock(&ctx->mutex);
 
-       put_ctx(ctx);
+               put_ctx(ctx);
+       }
+}
+
+void perf_event_delayed_put(struct task_struct *task)
+{
+       int ctxn;
+
+       for_each_task_context_nr(ctxn)
+               WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
+}
+
+/*
+ * inherit a event from parent task to child task:
+ */
+static struct perf_event *
+inherit_event(struct perf_event *parent_event,
+             struct task_struct *parent,
+             struct perf_event_context *parent_ctx,
+             struct task_struct *child,
+             struct perf_event *group_leader,
+             struct perf_event_context *child_ctx)
+{
+       struct perf_event *child_event;
+       unsigned long flags;
+
+       /*
+        * Instead of creating recursive hierarchies of events,
+        * we link inherited events back to the original parent,
+        * which has a filp for sure, which we use as the reference
+        * count:
+        */
+       if (parent_event->parent)
+               parent_event = parent_event->parent;
+
+       child_event = perf_event_alloc(&parent_event->attr,
+                                          parent_event->cpu,
+                                          group_leader, parent_event,
+                                          NULL);
+       if (IS_ERR(child_event))
+               return child_event;
+       get_ctx(child_ctx);
+
+       /*
+        * Make the child state follow the state of the parent event,
+        * not its attr.disabled bit.  We hold the parent's mutex,
+        * so we won't race with perf_event_{en, dis}able_family.
+        */
+       if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
+               child_event->state = PERF_EVENT_STATE_INACTIVE;
+       else
+               child_event->state = PERF_EVENT_STATE_OFF;
+
+       if (parent_event->attr.freq) {
+               u64 sample_period = parent_event->hw.sample_period;
+               struct hw_perf_event *hwc = &child_event->hw;
+
+               hwc->sample_period = sample_period;
+               hwc->last_period   = sample_period;
+
+               local64_set(&hwc->period_left, sample_period);
+       }
+
+       child_event->ctx = child_ctx;
+       child_event->overflow_handler = parent_event->overflow_handler;
+
+       /*
+        * Link it up in the child's context:
+        */
+       raw_spin_lock_irqsave(&child_ctx->lock, flags);
+       add_event_to_ctx(child_event, child_ctx);
+       raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+
+       /*
+        * Get a reference to the parent filp - we will fput it
+        * when the child event exits. This is safe to do because
+        * we are in the parent and we know that the filp still
+        * exists and has a nonzero count:
+        */
+       atomic_long_inc(&parent_event->filp->f_count);
+
+       /*
+        * Link this into the parent event's child list
+        */
+       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+       mutex_lock(&parent_event->child_mutex);
+       list_add_tail(&child_event->child_list, &parent_event->child_list);
+       mutex_unlock(&parent_event->child_mutex);
+
+       return child_event;
+}
+
+static int inherit_group(struct perf_event *parent_event,
+             struct task_struct *parent,
+             struct perf_event_context *parent_ctx,
+             struct task_struct *child,
+             struct perf_event_context *child_ctx)
+{
+       struct perf_event *leader;
+       struct perf_event *sub;
+       struct perf_event *child_ctr;
+
+       leader = inherit_event(parent_event, parent, parent_ctx,
+                                child, NULL, child_ctx);
+       if (IS_ERR(leader))
+               return PTR_ERR(leader);
+       list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
+               child_ctr = inherit_event(sub, parent, parent_ctx,
+                                           child, leader, child_ctx);
+               if (IS_ERR(child_ctr))
+                       return PTR_ERR(child_ctr);
+       }
+       return 0;
 }
 
 static int
 inherit_task_group(struct perf_event *event, struct task_struct *parent,
                   struct perf_event_context *parent_ctx,
-                  struct task_struct *child,
+                  struct task_struct *child, int ctxn,
                   int *inherited_all)
 {
        int ret;
-       struct perf_event_context *child_ctx = child->perf_event_ctxp;
+       struct perf_event_context *child_ctx;
 
        if (!event->attr.inherit) {
                *inherited_all = 0;
                return 0;
        }
 
+               child_ctx = child->perf_event_ctxp[ctxn];
        if (!child_ctx) {
                /*
                 * This is executed from the parent task context, so
@@ -5593,14 +6128,11 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
                 * child.
                 */
 
-               child_ctx = kzalloc(sizeof(struct perf_event_context),
-                                   GFP_KERNEL);
+               child_ctx = alloc_perf_context(event->pmu, child);
                if (!child_ctx)
                        return -ENOMEM;
 
-               __perf_event_init_context(child_ctx, child);
-               child->perf_event_ctxp = child_ctx;
-               get_task_struct(child);
+               child->perf_event_ctxp[ctxn] = child_ctx;
        }
 
        ret = inherit_group(event, parent, parent_ctx,
@@ -5612,11 +6144,10 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
        return ret;
 }
 
-
 /*
  * Initialize the perf_event context in task_struct
  */
-int perf_event_init_task(struct task_struct *child)
+int perf_event_init_context(struct task_struct *child, int ctxn)
 {
        struct perf_event_context *child_ctx, *parent_ctx;
        struct perf_event_context *cloned_ctx;
@@ -5625,19 +6156,19 @@ int perf_event_init_task(struct task_struct *child)
        int inherited_all = 1;
        int ret = 0;
 
-       child->perf_event_ctxp = NULL;
+       child->perf_event_ctxp[ctxn] = NULL;
 
        mutex_init(&child->perf_event_mutex);
        INIT_LIST_HEAD(&child->perf_event_list);
 
-       if (likely(!parent->perf_event_ctxp))
+       if (likely(!parent->perf_event_ctxp[ctxn]))
                return 0;
 
        /*
         * If the parent's context is a clone, pin it so it won't get
         * swapped under us.
         */
-       parent_ctx = perf_pin_task_context(parent);
+       parent_ctx = perf_pin_task_context(parent, ctxn);
 
        /*
         * No need to check if parent_ctx != NULL here; since we saw
@@ -5657,20 +6188,20 @@ int perf_event_init_task(struct task_struct *child)
         * the list, not manipulating it:
         */
        list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
-               ret = inherit_task_group(event, parent, parent_ctx, child,
-                                        &inherited_all);
+               ret = inherit_task_group(event, parent, parent_ctx,
+                                        child, ctxn, &inherited_all);
                if (ret)
                        break;
        }
 
        list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
-               ret = inherit_task_group(event, parent, parent_ctx, child,
-                                        &inherited_all);
+               ret = inherit_task_group(event, parent, parent_ctx,
+                                        child, ctxn, &inherited_all);
                if (ret)
                        break;
        }
 
-       child_ctx = child->perf_event_ctxp;
+       child_ctx = child->perf_event_ctxp[ctxn];
 
        if (child_ctx && inherited_all) {
                /*
@@ -5699,63 +6230,98 @@ int perf_event_init_task(struct task_struct *child)
        return ret;
 }
 
+/*
+ * Initialize the perf_event context in task_struct
+ */
+int perf_event_init_task(struct task_struct *child)
+{
+       int ctxn, ret;
+
+       for_each_task_context_nr(ctxn) {
+               ret = perf_event_init_context(child, ctxn);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static void __init perf_event_init_all_cpus(void)
 {
+       struct swevent_htable *swhash;
        int cpu;
-       struct perf_cpu_context *cpuctx;
 
        for_each_possible_cpu(cpu) {
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               mutex_init(&cpuctx->hlist_mutex);
-               __perf_event_init_context(&cpuctx->ctx, NULL);
+               swhash = &per_cpu(swevent_htable, cpu);
+               mutex_init(&swhash->hlist_mutex);
+               INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
        }
 }
 
 static void __cpuinit perf_event_init_cpu(int cpu)
 {
-       struct perf_cpu_context *cpuctx;
-
-       cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
-       spin_lock(&perf_resource_lock);
-       cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
-       spin_unlock(&perf_resource_lock);
-
-       mutex_lock(&cpuctx->hlist_mutex);
-       if (cpuctx->hlist_refcount > 0) {
+       mutex_lock(&swhash->hlist_mutex);
+       if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
 
-               hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
-               WARN_ON_ONCE(!hlist);
-               rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+               hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
+               WARN_ON(!hlist);
+               rcu_assign_pointer(swhash->swevent_hlist, hlist);
        }
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_unlock(&swhash->hlist_mutex);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void __perf_event_exit_cpu(void *info)
+static void perf_pmu_rotate_stop(struct pmu *pmu)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = &cpuctx->ctx;
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+
+       WARN_ON(!irqs_disabled());
+
+       list_del_init(&cpuctx->rotation_list);
+}
+
+static void __perf_event_exit_context(void *__info)
+{
+       struct perf_event_context *ctx = __info;
        struct perf_event *event, *tmp;
 
+       perf_pmu_rotate_stop(ctx->pmu);
+
        list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
                __perf_event_remove_from_context(event);
        list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
                __perf_event_remove_from_context(event);
 }
+
+static void perf_event_exit_cpu_context(int cpu)
+{
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int idx;
+
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+
+               mutex_lock(&ctx->mutex);
+               smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+               mutex_unlock(&ctx->mutex);
+       }
+       srcu_read_unlock(&pmus_srcu, idx);
+}
+
 static void perf_event_exit_cpu(int cpu)
 {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
-       struct perf_event_context *ctx = &cpuctx->ctx;
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
-       mutex_lock(&cpuctx->hlist_mutex);
-       swevent_hlist_release(cpuctx);
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
+       swevent_hlist_release(swhash);
+       mutex_unlock(&swhash->hlist_mutex);
 
-       mutex_lock(&ctx->mutex);
-       smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
-       mutex_unlock(&ctx->mutex);
+       perf_event_exit_cpu_context(cpu);
 }
 #else
 static inline void perf_event_exit_cpu(int cpu) { }
@@ -5785,118 +6351,13 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
        return NOTIFY_OK;
 }
 
-/*
- * This has to have a higher priority than migration_notifier in sched.c.
- */
-static struct notifier_block __cpuinitdata perf_cpu_nb = {
-       .notifier_call          = perf_cpu_notify,
-       .priority               = 20,
-};
-
 void __init perf_event_init(void)
 {
        perf_event_init_all_cpus();
-       perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
-                       (void *)(long)smp_processor_id());
-       perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
-                       (void *)(long)smp_processor_id());
-       register_cpu_notifier(&perf_cpu_nb);
-}
-
-static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
-                                       struct sysdev_class_attribute *attr,
-                                       char *buf)
-{
-       return sprintf(buf, "%d\n", perf_reserved_percpu);
-}
-
-static ssize_t
-perf_set_reserve_percpu(struct sysdev_class *class,
-                       struct sysdev_class_attribute *attr,
-                       const char *buf,
-                       size_t count)
-{
-       struct perf_cpu_context *cpuctx;
-       unsigned long val;
-       int err, cpu, mpt;
-
-       err = strict_strtoul(buf, 10, &val);
-       if (err)
-               return err;
-       if (val > perf_max_events)
-               return -EINVAL;
-
-       spin_lock(&perf_resource_lock);
-       perf_reserved_percpu = val;
-       for_each_online_cpu(cpu) {
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               raw_spin_lock_irq(&cpuctx->ctx.lock);
-               mpt = min(perf_max_events - cpuctx->ctx.nr_events,
-                         perf_max_events - perf_reserved_percpu);
-               cpuctx->max_pertask = mpt;
-               raw_spin_unlock_irq(&cpuctx->ctx.lock);
-       }
-       spin_unlock(&perf_resource_lock);
-
-       return count;
-}
-
-static ssize_t perf_show_overcommit(struct sysdev_class *class,
-                                   struct sysdev_class_attribute *attr,
-                                   char *buf)
-{
-       return sprintf(buf, "%d\n", perf_overcommit);
-}
-
-static ssize_t
-perf_set_overcommit(struct sysdev_class *class,
-                   struct sysdev_class_attribute *attr,
-                   const char *buf, size_t count)
-{
-       unsigned long val;
-       int err;
-
-       err = strict_strtoul(buf, 10, &val);
-       if (err)
-               return err;
-       if (val > 1)
-               return -EINVAL;
-
-       spin_lock(&perf_resource_lock);
-       perf_overcommit = val;
-       spin_unlock(&perf_resource_lock);
-
-       return count;
-}
-
-static SYSDEV_CLASS_ATTR(
-                               reserve_percpu,
-                               0644,
-                               perf_show_reserve_percpu,
-                               perf_set_reserve_percpu
-                       );
-
-static SYSDEV_CLASS_ATTR(
-                               overcommit,
-                               0644,
-                               perf_show_overcommit,
-                               perf_set_overcommit
-                       );
-
-static struct attribute *perfclass_attrs[] = {
-       &attr_reserve_percpu.attr,
-       &attr_overcommit.attr,
-       NULL
-};
-
-static struct attribute_group perfclass_attr_group = {
-       .attrs                  = perfclass_attrs,
-       .name                   = "perf_events",
-};
-
-static int __init perf_event_sysfs_init(void)
-{
-       return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
-                                 &perfclass_attr_group);
+       init_srcu_struct(&pmus_srcu);
+       perf_pmu_register(&perf_swevent);
+       perf_pmu_register(&perf_cpu_clock);
+       perf_pmu_register(&perf_task_clock);
+       perf_tp_register();
+       perf_cpu_notifier(perf_cpu_notify);
 }
-device_initcall(perf_event_sysfs_init);
index dc85ceb908322cad7196339f4df8dd58c37b1cec..c0d2067f3e0d81bb45c3372bb39358164612f3a6 100644 (file)
@@ -3584,7 +3584,7 @@ void scheduler_tick(void)
        curr->sched_class->task_tick(rq, curr, 0);
        raw_spin_unlock(&rq->lock);
 
-       perf_event_task_tick(curr);
+       perf_event_task_tick();
 
 #ifdef CONFIG_SMP
        rq->idle_at_tick = idle_cpu(cpu);
index 75c970c715d399f1385d72e92e0c47c509e568dc..ed6aacfcb7efb307fe313ea798e7074f2c8f4f92 100644 (file)
@@ -365,9 +365,10 @@ call:
 EXPORT_SYMBOL_GPL(smp_call_function_any);
 
 /**
- * __smp_call_function_single(): Run a function on another CPU
+ * __smp_call_function_single(): Run a function on a specific CPU
  * @cpu: The CPU to run on.
  * @data: Pre-allocated and setup data structure
+ * @wait: If true, wait until function has completed on specified CPU.
  *
  * Like smp_call_function_single(), but allow caller to pass in a
  * pre-allocated data structure. Useful for embedding @data inside
@@ -376,8 +377,10 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
 void __smp_call_function_single(int cpu, struct call_single_data *data,
                                int wait)
 {
-       csd_lock(data);
+       unsigned int this_cpu;
+       unsigned long flags;
 
+       this_cpu = get_cpu();
        /*
         * Can deadlock when called with interrupts disabled.
         * We allow cpu's that are not yet online though, as no one else can
@@ -387,7 +390,15 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
        WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
                     && !oops_in_progress);
 
-       generic_exec_single(cpu, data, wait);
+       if (cpu == this_cpu) {
+               local_irq_save(flags);
+               data->func(data->info);
+               local_irq_restore(flags);
+       } else {
+               csd_lock(data);
+               generic_exec_single(cpu, data, wait);
+       }
+       put_cpu();
 }
 
 /**
index 4f104515a19bcb18ca73226c745786a41d41b4ae..f8b11a283171b65849c5ed3bc0e162397f76fbea 100644 (file)
@@ -115,7 +115,9 @@ static int test_kprobes(void)
        int ret;
        struct kprobe *kps[2] = {&kp, &kp2};
 
-       kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+       /* addr and flags should be cleard for reusing kprobe. */
+       kp.addr = NULL;
+       kp.flags = 0;
        ret = register_kprobes(kps, 2);
        if (ret < 0) {
                printk(KERN_ERR "Kprobe smoke test failed: "
@@ -210,7 +212,9 @@ static int test_jprobes(void)
        int ret;
        struct jprobe *jps[2] = {&jp, &jp2};
 
-       jp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+       /* addr and flags should be cleard for reusing kprobe. */
+       jp.kp.addr = NULL;
+       jp.kp.flags = 0;
        ret = register_jprobes(jps, 2);
        if (ret < 0) {
                printk(KERN_ERR "Kprobe smoke test failed: "
@@ -323,7 +327,9 @@ static int test_kretprobes(void)
        int ret;
        struct kretprobe *rps[2] = {&rp, &rp2};
 
-       rp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+       /* addr and flags should be cleard for reusing kprobe. */
+       rp.kp.addr = NULL;
+       rp.kp.flags = 0;
        ret = register_kretprobes(rps, 2);
        if (ret < 0) {
                printk(KERN_ERR "Kprobe smoke test failed: "
index 538501c6ea5058cf703eaa2608307f03f3aee89a..e550d2eda1dfdbf843e60c170d1fff3ec4e4eaf9 100644 (file)
@@ -49,6 +49,11 @@ config HAVE_SYSCALL_TRACEPOINTS
        help
          See Documentation/trace/ftrace-design.txt
 
+config HAVE_C_RECORDMCOUNT
+       bool
+       help
+         C version of recordmcount available?
+
 config TRACER_MAX_TRACE
        bool
 
index fa7ece649fe1bcad7b0db621fa8579e91860fb04..65fb077ea79c147c21d6a8a4890a2ec6d17712bc 100644 (file)
@@ -884,10 +884,8 @@ enum {
        FTRACE_ENABLE_CALLS             = (1 << 0),
        FTRACE_DISABLE_CALLS            = (1 << 1),
        FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
-       FTRACE_ENABLE_MCOUNT            = (1 << 3),
-       FTRACE_DISABLE_MCOUNT           = (1 << 4),
-       FTRACE_START_FUNC_RET           = (1 << 5),
-       FTRACE_STOP_FUNC_RET            = (1 << 6),
+       FTRACE_START_FUNC_RET           = (1 << 3),
+       FTRACE_STOP_FUNC_RET            = (1 << 4),
 };
 
 static int ftrace_filtered;
@@ -1226,8 +1224,6 @@ static void ftrace_shutdown(int command)
 
 static void ftrace_startup_sysctl(void)
 {
-       int command = FTRACE_ENABLE_MCOUNT;
-
        if (unlikely(ftrace_disabled))
                return;
 
@@ -1235,23 +1231,17 @@ static void ftrace_startup_sysctl(void)
        saved_ftrace_func = NULL;
        /* ftrace_start_up is true if we want ftrace running */
        if (ftrace_start_up)
-               command |= FTRACE_ENABLE_CALLS;
-
-       ftrace_run_update_code(command);
+               ftrace_run_update_code(FTRACE_ENABLE_CALLS);
 }
 
 static void ftrace_shutdown_sysctl(void)
 {
-       int command = FTRACE_DISABLE_MCOUNT;
-
        if (unlikely(ftrace_disabled))
                return;
 
        /* ftrace_start_up is true if ftrace is running */
        if (ftrace_start_up)
-               command |= FTRACE_DISABLE_CALLS;
-
-       ftrace_run_update_code(command);
+               ftrace_run_update_code(FTRACE_DISABLE_CALLS);
 }
 
 static cycle_t         ftrace_update_time;
@@ -1368,24 +1358,29 @@ enum {
 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
 
 struct ftrace_iterator {
-       struct ftrace_page      *pg;
-       int                     hidx;
-       int                     idx;
-       unsigned                flags;
-       struct trace_parser     parser;
+       loff_t                          pos;
+       loff_t                          func_pos;
+       struct ftrace_page              *pg;
+       struct dyn_ftrace               *func;
+       struct ftrace_func_probe        *probe;
+       struct trace_parser             parser;
+       int                             hidx;
+       int                             idx;
+       unsigned                        flags;
 };
 
 static void *
-t_hash_next(struct seq_file *m, void *v, loff_t *pos)
+t_hash_next(struct seq_file *m, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
-       struct hlist_node *hnd = v;
+       struct hlist_node *hnd = NULL;
        struct hlist_head *hhd;
 
-       WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
-
        (*pos)++;
+       iter->pos = *pos;
 
+       if (iter->probe)
+               hnd = &iter->probe->node;
  retry:
        if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
                return NULL;
@@ -1408,7 +1403,12 @@ t_hash_next(struct seq_file *m, void *v, loff_t *pos)
                }
        }
 
-       return hnd;
+       if (WARN_ON_ONCE(!hnd))
+               return NULL;
+
+       iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
+
+       return iter;
 }
 
 static void *t_hash_start(struct seq_file *m, loff_t *pos)
@@ -1417,26 +1417,32 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
        void *p = NULL;
        loff_t l;
 
-       if (!(iter->flags & FTRACE_ITER_HASH))
-               *pos = 0;
-
-       iter->flags |= FTRACE_ITER_HASH;
+       if (iter->func_pos > *pos)
+               return NULL;
 
        iter->hidx = 0;
-       for (l = 0; l <= *pos; ) {
-               p = t_hash_next(m, p, &l);
+       for (l = 0; l <= (*pos - iter->func_pos); ) {
+               p = t_hash_next(m, &l);
                if (!p)
                        break;
        }
-       return p;
+       if (!p)
+               return NULL;
+
+       /* Only set this if we have an item */
+       iter->flags |= FTRACE_ITER_HASH;
+
+       return iter;
 }
 
-static int t_hash_show(struct seq_file *m, void *v)
+static int
+t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
 {
        struct ftrace_func_probe *rec;
-       struct hlist_node *hnd = v;
 
-       rec = hlist_entry(hnd, struct ftrace_func_probe, node);
+       rec = iter->probe;
+       if (WARN_ON_ONCE(!rec))
+               return -EIO;
 
        if (rec->ops->print)
                return rec->ops->print(m, rec->ip, rec->ops, rec->data);
@@ -1457,12 +1463,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
        struct dyn_ftrace *rec = NULL;
 
        if (iter->flags & FTRACE_ITER_HASH)
-               return t_hash_next(m, v, pos);
+               return t_hash_next(m, pos);
 
        (*pos)++;
+       iter->pos = *pos;
 
        if (iter->flags & FTRACE_ITER_PRINTALL)
-               return NULL;
+               return t_hash_start(m, pos);
 
  retry:
        if (iter->idx >= iter->pg->index) {
@@ -1491,7 +1498,20 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                }
        }
 
-       return rec;
+       if (!rec)
+               return t_hash_start(m, pos);
+
+       iter->func_pos = *pos;
+       iter->func = rec;
+
+       return iter;
+}
+
+static void reset_iter_read(struct ftrace_iterator *iter)
+{
+       iter->pos = 0;
+       iter->func_pos = 0;
+       iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
 }
 
 static void *t_start(struct seq_file *m, loff_t *pos)
@@ -1501,6 +1521,12 @@ static void *t_start(struct seq_file *m, loff_t *pos)
        loff_t l;
 
        mutex_lock(&ftrace_lock);
+       /*
+        * If an lseek was done, then reset and start from beginning.
+        */
+       if (*pos < iter->pos)
+               reset_iter_read(iter);
+
        /*
         * For set_ftrace_filter reading, if we have the filter
         * off, we can short cut and just print out that all
@@ -1518,6 +1544,11 @@ static void *t_start(struct seq_file *m, loff_t *pos)
        if (iter->flags & FTRACE_ITER_HASH)
                return t_hash_start(m, pos);
 
+       /*
+        * Unfortunately, we need to restart at ftrace_pages_start
+        * every time we let go of the ftrace_mutex. This is because
+        * those pointers can change without the lock.
+        */
        iter->pg = ftrace_pages_start;
        iter->idx = 0;
        for (l = 0; l <= *pos; ) {
@@ -1526,10 +1557,14 @@ static void *t_start(struct seq_file *m, loff_t *pos)
                        break;
        }
 
-       if (!p && iter->flags & FTRACE_ITER_FILTER)
-               return t_hash_start(m, pos);
+       if (!p) {
+               if (iter->flags & FTRACE_ITER_FILTER)
+                       return t_hash_start(m, pos);
 
-       return p;
+               return NULL;
+       }
+
+       return iter;
 }
 
 static void t_stop(struct seq_file *m, void *p)
@@ -1540,16 +1575,18 @@ static void t_stop(struct seq_file *m, void *p)
 static int t_show(struct seq_file *m, void *v)
 {
        struct ftrace_iterator *iter = m->private;
-       struct dyn_ftrace *rec = v;
+       struct dyn_ftrace *rec;
 
        if (iter->flags & FTRACE_ITER_HASH)
-               return t_hash_show(m, v);
+               return t_hash_show(m, iter);
 
        if (iter->flags & FTRACE_ITER_PRINTALL) {
                seq_printf(m, "#### all functions enabled ####\n");
                return 0;
        }
 
+       rec = iter->func;
+
        if (!rec)
                return 0;
 
@@ -2418,7 +2455,7 @@ static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = seq_read,
        .write = ftrace_filter_write,
-       .llseek = no_llseek,
+       .llseek = ftrace_regex_lseek,
        .release = ftrace_filter_release,
 };
 
index 492197e2f86cda2792603186b59ad3fdd17c448d..4e2f03410377af2ee6f0fb46a7102e1ae76eba0c 100644 (file)
@@ -2606,6 +2606,19 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
 
+/*
+ * The total entries in the ring buffer is the running counter
+ * of entries entered into the ring buffer, minus the sum of
+ * the entries read from the ring buffer and the number of
+ * entries that were overwritten.
+ */
+static inline unsigned long
+rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
+{
+       return local_read(&cpu_buffer->entries) -
+               (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
+}
+
 /**
  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
  * @buffer: The ring buffer
@@ -2614,16 +2627,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
-       unsigned long ret;
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return 0;
 
        cpu_buffer = buffer->buffers[cpu];
-       ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
-               - cpu_buffer->read;
 
-       return ret;
+       return rb_num_of_entries(cpu_buffer);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
 
@@ -2684,8 +2694,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
        /* if you care about this being correct, lock the buffer */
        for_each_buffer_cpu(buffer, cpu) {
                cpu_buffer = buffer->buffers[cpu];
-               entries += (local_read(&cpu_buffer->entries) -
-                           local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
+               entries += rb_num_of_entries(cpu_buffer);
        }
 
        return entries;
index 31cc4cb0dbf2afaa49d5f7428f5cce31d535211d..39c059ca670e64156e6681782ffa708c6b8d720f 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/kprobes.h>
 #include "trace.h"
 
-static char *perf_trace_buf[4];
+static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
 
 /*
  * Force it to be aligned to unsigned long to avoid misaligned accesses
@@ -24,7 +24,7 @@ static int    total_ref_count;
 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
                                 struct perf_event *p_event)
 {
-       struct hlist_head *list;
+       struct hlist_head __percpu *list;
        int ret = -ENOMEM;
        int cpu;
 
@@ -42,11 +42,11 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
        tp_event->perf_events = list;
 
        if (!total_ref_count) {
-               char *buf;
+               char __percpu *buf;
                int i;
 
-               for (i = 0; i < 4; i++) {
-                       buf = (char *)alloc_percpu(perf_trace_t);
+               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
+                       buf = (char __percpu *)alloc_percpu(perf_trace_t);
                        if (!buf)
                                goto fail;
 
@@ -65,7 +65,7 @@ fail:
        if (!total_ref_count) {
                int i;
 
-               for (i = 0; i < 4; i++) {
+               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
                        free_percpu(perf_trace_buf[i]);
                        perf_trace_buf[i] = NULL;
                }
@@ -101,22 +101,26 @@ int perf_trace_init(struct perf_event *p_event)
        return ret;
 }
 
-int perf_trace_enable(struct perf_event *p_event)
+int perf_trace_add(struct perf_event *p_event, int flags)
 {
        struct ftrace_event_call *tp_event = p_event->tp_event;
+       struct hlist_head __percpu *pcpu_list;
        struct hlist_head *list;
 
-       list = tp_event->perf_events;
-       if (WARN_ON_ONCE(!list))
+       pcpu_list = tp_event->perf_events;
+       if (WARN_ON_ONCE(!pcpu_list))
                return -EINVAL;
 
-       list = this_cpu_ptr(list);
+       if (!(flags & PERF_EF_START))
+               p_event->hw.state = PERF_HES_STOPPED;
+
+       list = this_cpu_ptr(pcpu_list);
        hlist_add_head_rcu(&p_event->hlist_entry, list);
 
        return 0;
 }
 
-void perf_trace_disable(struct perf_event *p_event)
+void perf_trace_del(struct perf_event *p_event, int flags)
 {
        hlist_del_rcu(&p_event->hlist_entry);
 }
@@ -142,7 +146,7 @@ void perf_trace_destroy(struct perf_event *p_event)
        tp_event->perf_events = NULL;
 
        if (!--total_ref_count) {
-               for (i = 0; i < 4; i++) {
+               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
                        free_percpu(perf_trace_buf[i]);
                        perf_trace_buf[i] = NULL;
                }
index 4c758f146328f18ce82a318fb60a0413006aca8f..398c0e8b332c1840e16bc0599c1230f9594d29a2 100644 (file)
@@ -600,21 +600,29 @@ out:
 
 enum {
        FORMAT_HEADER           = 1,
-       FORMAT_PRINTFMT         = 2,
+       FORMAT_FIELD_SEPERATOR  = 2,
+       FORMAT_PRINTFMT         = 3,
 };
 
 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 {
        struct ftrace_event_call *call = m->private;
        struct ftrace_event_field *field;
-       struct list_head *head;
+       struct list_head *common_head = &ftrace_common_fields;
+       struct list_head *head = trace_get_fields(call);
 
        (*pos)++;
 
        switch ((unsigned long)v) {
        case FORMAT_HEADER:
-               head = &ftrace_common_fields;
+               if (unlikely(list_empty(common_head)))
+                       return NULL;
+
+               field = list_entry(common_head->prev,
+                                  struct ftrace_event_field, link);
+               return field;
 
+       case FORMAT_FIELD_SEPERATOR:
                if (unlikely(list_empty(head)))
                        return NULL;
 
@@ -626,31 +634,10 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
                return NULL;
        }
 
-       head = trace_get_fields(call);
-
-       /*
-        * To separate common fields from event fields, the
-        * LSB is set on the first event field. Clear it in case.
-        */
-       v = (void *)((unsigned long)v & ~1L);
-
        field = v;
-       /*
-        * If this is a common field, and at the end of the list, then
-        * continue with main list.
-        */
-       if (field->link.prev == &ftrace_common_fields) {
-               if (unlikely(list_empty(head)))
-                       return NULL;
-               field = list_entry(head->prev, struct ftrace_event_field, link);
-               /* Set the LSB to notify f_show to print an extra newline */
-               field = (struct ftrace_event_field *)
-                       ((unsigned long)field | 1);
-               return field;
-       }
-
-       /* If we are done tell f_show to print the format */
-       if (field->link.prev == head)
+       if (field->link.prev == common_head)
+               return (void *)FORMAT_FIELD_SEPERATOR;
+       else if (field->link.prev == head)
                return (void *)FORMAT_PRINTFMT;
 
        field = list_entry(field->link.prev, struct ftrace_event_field, link);
@@ -688,22 +675,16 @@ static int f_show(struct seq_file *m, void *v)
                seq_printf(m, "format:\n");
                return 0;
 
+       case FORMAT_FIELD_SEPERATOR:
+               seq_putc(m, '\n');
+               return 0;
+
        case FORMAT_PRINTFMT:
                seq_printf(m, "\nprint fmt: %s\n",
                           call->print_fmt);
                return 0;
        }
 
-       /*
-        * To separate common fields from event fields, the
-        * LSB is set on the first event field. Clear it and
-        * print a newline if it is set.
-        */
-       if ((unsigned long)v & 1) {
-               seq_putc(m, '\n');
-               v = (void *)((unsigned long)v & ~1L);
-       }
-
        field = v;
 
        /*
index 6f233698518ede15cc9302e889de9f108aa0f1cb..ef49e9370b25e8b525edbc150c9350010dd19cd6 100644 (file)
 #include "trace.h"
 #include "trace_output.h"
 
+/* When set, irq functions will be ignored */
+static int ftrace_graph_skip_irqs;
+
 struct fgraph_cpu_data {
        pid_t           last_pid;
        int             depth;
+       int             depth_irq;
        int             ignore;
        unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
 };
 
 struct fgraph_data {
-       struct fgraph_cpu_data          *cpu_data;
+       struct fgraph_cpu_data __percpu *cpu_data;
 
        /* Place to preserve last processed entry. */
        struct ftrace_graph_ent_entry   ent;
@@ -41,6 +45,7 @@ struct fgraph_data {
 #define TRACE_GRAPH_PRINT_PROC         0x8
 #define TRACE_GRAPH_PRINT_DURATION     0x10
 #define TRACE_GRAPH_PRINT_ABS_TIME     0x20
+#define TRACE_GRAPH_PRINT_IRQS         0x40
 
 static struct tracer_opt trace_opts[] = {
        /* Display overruns? (for self-debug purpose) */
@@ -55,13 +60,15 @@ static struct tracer_opt trace_opts[] = {
        { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
        /* Display absolute time of an entry */
        { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
+       /* Display interrupts */
+       { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
        { } /* Empty entry */
 };
 
 static struct tracer_flags tracer_flags = {
        /* Don't display overruns and proc by default */
        .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
-              TRACE_GRAPH_PRINT_DURATION,
+              TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
        .opts = trace_opts
 };
 
@@ -204,6 +211,14 @@ int __trace_graph_entry(struct trace_array *tr,
        return 1;
 }
 
+static inline int ftrace_graph_ignore_irqs(void)
+{
+       if (!ftrace_graph_skip_irqs)
+               return 0;
+
+       return in_irq();
+}
+
 int trace_graph_entry(struct ftrace_graph_ent *trace)
 {
        struct trace_array *tr = graph_array;
@@ -218,7 +233,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
                return 0;
 
        /* trace it when it is-nested-in or is a function enabled. */
-       if (!(trace->depth || ftrace_graph_addr(trace->func)))
+       if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
+             ftrace_graph_ignore_irqs())
                return 0;
 
        local_irq_save(flags);
@@ -649,8 +665,9 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 
        /* Print nsecs (we don't want to exceed 7 numbers) */
        if (len < 7) {
-               snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu",
-                        nsecs_rem);
+               size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
+
+               snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
                ret = trace_seq_printf(s, ".%s", nsecs_str);
                if (!ret)
                        return TRACE_TYPE_PARTIAL_LINE;
@@ -855,6 +872,92 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
        return 0;
 }
 
+/*
+ * Entry check for irq code
+ *
+ * returns 1 if
+ *  - we are inside irq code
+ *  - we just extered irq code
+ *
+ * retunns 0 if
+ *  - funcgraph-interrupts option is set
+ *  - we are not inside irq code
+ */
+static int
+check_irq_entry(struct trace_iterator *iter, u32 flags,
+               unsigned long addr, int depth)
+{
+       int cpu = iter->cpu;
+       struct fgraph_data *data = iter->private;
+       int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
+
+       if (flags & TRACE_GRAPH_PRINT_IRQS)
+               return 0;
+
+       /*
+        * We are inside the irq code
+        */
+       if (*depth_irq >= 0)
+               return 1;
+
+       if ((addr < (unsigned long)__irqentry_text_start) ||
+           (addr >= (unsigned long)__irqentry_text_end))
+               return 0;
+
+       /*
+        * We are entering irq code.
+        */
+       *depth_irq = depth;
+       return 1;
+}
+
+/*
+ * Return check for irq code
+ *
+ * returns 1 if
+ *  - we are inside irq code
+ *  - we just left irq code
+ *
+ * returns 0 if
+ *  - funcgraph-interrupts option is set
+ *  - we are not inside irq code
+ */
+static int
+check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
+{
+       int cpu = iter->cpu;
+       struct fgraph_data *data = iter->private;
+       int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
+
+       if (flags & TRACE_GRAPH_PRINT_IRQS)
+               return 0;
+
+       /*
+        * We are not inside the irq code.
+        */
+       if (*depth_irq == -1)
+               return 0;
+
+       /*
+        * We are inside the irq code, and this is returning entry.
+        * Let's not trace it and clear the entry depth, since
+        * we are out of irq code.
+        *
+        * This condition ensures that we 'leave the irq code' once
+        * we are out of the entry depth. Thus protecting us from
+        * the RETURN entry loss.
+        */
+       if (*depth_irq >= depth) {
+               *depth_irq = -1;
+               return 1;
+       }
+
+       /*
+        * We are inside the irq code, and this is not the entry.
+        */
+       return 1;
+}
+
 static enum print_line_t
 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
                        struct trace_iterator *iter, u32 flags)
@@ -865,6 +968,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
        static enum print_line_t ret;
        int cpu = iter->cpu;
 
+       if (check_irq_entry(iter, flags, call->func, call->depth))
+               return TRACE_TYPE_HANDLED;
+
        if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
                return TRACE_TYPE_PARTIAL_LINE;
 
@@ -902,6 +1008,9 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
        int ret;
        int i;
 
+       if (check_irq_return(iter, flags, trace->depth))
+               return TRACE_TYPE_HANDLED;
+
        if (data) {
                struct fgraph_cpu_data *cpu_data;
                int cpu = iter->cpu;
@@ -1210,9 +1319,12 @@ void graph_trace_open(struct trace_iterator *iter)
                pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
                int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
                int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
+               int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
+
                *pid = -1;
                *depth = 0;
                *ignore = 0;
+               *depth_irq = -1;
        }
 
        iter->private = data;
@@ -1235,6 +1347,14 @@ void graph_trace_close(struct trace_iterator *iter)
        }
 }
 
+static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
+{
+       if (bit == TRACE_GRAPH_PRINT_IRQS)
+               ftrace_graph_skip_irqs = !set;
+
+       return 0;
+}
+
 static struct trace_event_functions graph_functions = {
        .trace          = print_graph_function_event,
 };
@@ -1261,6 +1381,7 @@ static struct tracer graph_trace __read_mostly = {
        .print_line     = print_graph_function,
        .print_header   = print_graph_headers,
        .flags          = &tracer_flags,
+       .set_flag       = func_graph_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest       = trace_selftest_startup_function_graph,
 #endif
index a7cc3793baf6897d2535737a52322552040d2737..209b379a47210dad4170e5914e5ecc68207f8784 100644 (file)
@@ -263,6 +263,11 @@ int __init trace_workqueue_early_init(void)
 {
        int ret, cpu;
 
+       for_each_possible_cpu(cpu) {
+               spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
+               INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
+       }
+
        ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
        if (ret)
                goto out;
@@ -279,11 +284,6 @@ int __init trace_workqueue_early_init(void)
        if (ret)
                goto no_creation;
 
-       for_each_possible_cpu(cpu) {
-               spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
-               INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
-       }
-
        return 0;
 
 no_creation:
index c77f3eceea250e49b0ff001959f8df9eeb8e0716..d6073a50a6cad15b9369132c49815655f4ee0661 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/jump_label.h>
 
 extern struct tracepoint __start___tracepoints[];
 extern struct tracepoint __stop___tracepoints[];
@@ -263,7 +264,13 @@ static void set_tracepoint(struct tracepoint_entry **entry,
         * is used.
         */
        rcu_assign_pointer(elem->funcs, (*entry)->funcs);
-       elem->state = active;
+       if (!elem->state && active) {
+               enable_jump_label(&elem->state);
+               elem->state = active;
+       } else if (elem->state && !active) {
+               disable_jump_label(&elem->state);
+               elem->state = active;
+       }
 }
 
 /*
@@ -277,7 +284,10 @@ static void disable_tracepoint(struct tracepoint *elem)
        if (elem->unregfunc && elem->state)
                elem->unregfunc();
 
-       elem->state = 0;
+       if (elem->state) {
+               disable_jump_label(&elem->state);
+               elem->state = 0;
+       }
        rcu_assign_pointer(elem->funcs, NULL);
 }
 
index 7f9c3c52ecc12ef5d0de1728839218ff87c2dc7b..dc8e16824b51b18c8c113f0d670d70cb58ede740 100644 (file)
@@ -43,7 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 
-static int __read_mostly did_panic;
 static int __initdata no_watchdog;
 
 
@@ -187,18 +186,6 @@ static int is_softlockup(unsigned long touch_ts)
        return 0;
 }
 
-static int
-watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr)
-{
-       did_panic = 1;
-
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block panic_block = {
-       .notifier_call = watchdog_panic,
-};
-
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 static struct perf_event_attr wd_hw_attr = {
        .type           = PERF_TYPE_HARDWARE,
@@ -371,14 +358,14 @@ static int watchdog_nmi_enable(int cpu)
        /* Try to register using hardware perf events */
        wd_attr = &wd_hw_attr;
        wd_attr->sample_period = hw_nmi_get_sample_period();
-       event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback);
+       event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback);
        if (!IS_ERR(event)) {
                printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
                goto out_save;
        }
 
        printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event);
-       return -1;
+       return PTR_ERR(event);
 
        /* success path */
 out_save:
@@ -422,17 +409,19 @@ static int watchdog_prepare_cpu(int cpu)
 static int watchdog_enable(int cpu)
 {
        struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
+       int err;
 
        /* enable the perf event */
-       if (watchdog_nmi_enable(cpu) != 0)
-               return -1;
+       err = watchdog_nmi_enable(cpu);
+       if (err)
+               return err;
 
        /* create the watchdog thread */
        if (!p) {
                p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
                if (IS_ERR(p)) {
                        printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
-                       return -1;
+                       return PTR_ERR(p);
                }
                kthread_bind(p, cpu);
                per_cpu(watchdog_touch_ts, cpu) = 0;
@@ -484,6 +473,9 @@ static void watchdog_disable_all_cpus(void)
 {
        int cpu;
 
+       if (no_watchdog)
+               return;
+
        for_each_online_cpu(cpu)
                watchdog_disable(cpu);
 
@@ -526,17 +518,16 @@ static int __cpuinit
 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
        int hotcpu = (unsigned long)hcpu;
+       int err = 0;
 
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               if (watchdog_prepare_cpu(hotcpu))
-                       return NOTIFY_BAD;
+               err = watchdog_prepare_cpu(hotcpu);
                break;
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-               if (watchdog_enable(hotcpu))
-                       return NOTIFY_BAD;
+               err = watchdog_enable(hotcpu);
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
@@ -549,7 +540,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
                break;
 #endif /* CONFIG_HOTPLUG_CPU */
        }
-       return NOTIFY_OK;
+       return notifier_from_errno(err);
 }
 
 static struct notifier_block __cpuinitdata cpu_nfb = {
@@ -565,13 +556,11 @@ static int __init spawn_watchdog_task(void)
                return 0;
 
        err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
-       WARN_ON(err == NOTIFY_BAD);
+       WARN_ON(notifier_to_errno(err));
 
        cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
        register_cpu_notifier(&cpu_nfb);
 
-       atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
-
        return 0;
 }
 early_initcall(spawn_watchdog_task);
index 1b4afd2e6ca089de0babdacc5781426ef118da5c..e85d549b6eac20eb040ba4b27d6f4d334e0b5106 100644 (file)
@@ -482,6 +482,7 @@ config PROVE_LOCKING
        select DEBUG_SPINLOCK
        select DEBUG_MUTEXES
        select DEBUG_LOCK_ALLOC
+       select TRACE_IRQFLAGS
        default n
        help
         This feature enables the kernel to prove that all locking
@@ -579,11 +580,10 @@ config DEBUG_LOCKDEP
          of more runtime overhead.
 
 config TRACE_IRQFLAGS
-       depends on DEBUG_KERNEL
        bool
-       default y
-       depends on TRACE_IRQFLAGS_SUPPORT
-       depends on PROVE_LOCKING
+       help
+         Enables hooks to interrupt enabling and disabling for
+         either tracing or lock debugging.
 
 config DEBUG_SPINLOCK_SLEEP
        bool "Spinlock debugging: sleep-inside-spinlock checking"
index 7cdfad88128fa5d3d3076d552fbde276ef500d66..19552096d16b06bd2dac2a9b10212e482a4d0da3 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
        return NULL;
 }
 
-int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
-                       struct module *mod)
+void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+                        struct module *mod)
 {
        char *secstrings;
        unsigned int i;
@@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
         * could potentially lead to deadlock and thus be counter-productive.
         */
        list_add(&mod->bug_list, &module_bug_list);
-
-       return 0;
 }
 
 void module_bug_cleanup(struct module *mod)
index 02afc25337284329d23a998e636240d5a41d337d..e925c7b960f1ec4258392da3df0f5377d4ef870a 100644 (file)
 #include <linux/dynamic_debug.h>
 #include <linux/debugfs.h>
 #include <linux/slab.h>
+#include <linux/jump_label.h>
 
 extern struct _ddebug __start___verbose[];
 extern struct _ddebug __stop___verbose[];
 
-/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
- * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
- * use independent hash functions, to reduce the chance of false positives.
- */
-long long dynamic_debug_enabled;
-EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
-long long dynamic_debug_enabled2;
-EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
-
 struct ddebug_table {
        struct list_head link;
        char *mod_name;
@@ -87,26 +79,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
        return buf;
 }
 
-/*
- * must be called with ddebug_lock held
- */
-
-static int disabled_hash(char hash, bool first_table)
-{
-       struct ddebug_table *dt;
-       char table_hash_value;
-
-       list_for_each_entry(dt, &ddebug_tables, link) {
-               if (first_table)
-                       table_hash_value = dt->ddebugs->primary_hash;
-               else
-                       table_hash_value = dt->ddebugs->secondary_hash;
-               if (dt->num_enabled && (hash == table_hash_value))
-                       return 0;
-       }
-       return 1;
-}
-
 /*
  * Search the tables for _ddebug's which match the given
  * `query' and apply the `flags' and `mask' to them.  Tells
@@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query,
                                dt->num_enabled++;
                        dp->flags = newflags;
                        if (newflags) {
-                               dynamic_debug_enabled |=
-                                               (1LL << dp->primary_hash);
-                               dynamic_debug_enabled2 |=
-                                               (1LL << dp->secondary_hash);
+                               enable_jump_label(&dp->enabled);
                        } else {
-                               if (disabled_hash(dp->primary_hash, true))
-                                       dynamic_debug_enabled &=
-                                               ~(1LL << dp->primary_hash);
-                               if (disabled_hash(dp->secondary_hash, false))
-                                       dynamic_debug_enabled2 &=
-                                               ~(1LL << dp->secondary_hash);
+                               disable_jump_label(&dp->enabled);
                        }
                        if (verbose)
                                printk(KERN_INFO
index 4b5cb794c38bb270b8b72b70a47de265ec05c210..a7616fa3162e844f5b3c7090c1911543c826147e 100644 (file)
@@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv,
                 * element comparison is needed, so the client's cmp()
                 * routine can invoke cond_resched() periodically.
                 */
-               (*cmp)(priv, tail, tail);
+               (*cmp)(priv, tail->next, tail->next);
 
                tail->next->prev = tail;
                tail = tail->next;
index 46f5dacf90a2cd62427fdf89b67fa01ef8af68e0..ec520c7b28dffedb5027de6b27834831938671cd 100644 (file)
@@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 {
        struct mm_struct *mm = current->mm;
        struct address_space *mapping;
-       unsigned long end = start + size;
        struct vm_area_struct *vma;
        int err = -EINVAL;
        int has_write_lock = 0;
@@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
        if (start + size <= start)
                return err;
 
+       /* Does pgoff wrap? */
+       if (pgoff + (size >> PAGE_SHIFT) < pgoff)
+               return err;
+
        /* Can we represent this offset inside this architecture's pte's? */
 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
        if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
@@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
        if (!(vma->vm_flags & VM_CAN_NONLINEAR))
                goto out;
 
-       if (end <= start || start < vma->vm_start || end > vma->vm_end)
+       if (start < vma->vm_start || start + size > vma->vm_end)
                goto out;
 
        /* Must set VM_NONLINEAR before any pages are populated. */
index cc5be788a39fe132c72cbc1d2fb1c03f71708575..c03273807182dde1d9dd2e905c0db11a6dfe2441 100644 (file)
@@ -2324,11 +2324,8 @@ retry_avoidcopy:
         * and just make the page writable */
        avoidcopy = (page_mapcount(old_page) == 1);
        if (avoidcopy) {
-               if (!trylock_page(old_page)) {
-                       if (PageAnon(old_page))
-                               page_move_anon_rmap(old_page, vma, address);
-               } else
-                       unlock_page(old_page);
+               if (PageAnon(old_page))
+                       page_move_anon_rmap(old_page, vma, address);
                set_huge_ptep_writable(vma, address, ptep);
                return 0;
        }
@@ -2404,7 +2401,7 @@ retry_avoidcopy:
                set_huge_pte_at(mm, address, ptep,
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page);
-               hugepage_add_anon_rmap(new_page, vma, address);
+               hugepage_add_new_anon_rmap(new_page, vma, address);
                /* Make the old page be freed below */
                new_page = old_page;
                mmu_notifier_invalidate_range_end(mm,
@@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                                vma, address);
        }
 
-       if (!pagecache_page) {
-               page = pte_page(entry);
+       /*
+        * hugetlb_cow() requires page locks of pte_page(entry) and
+        * pagecache_page, so here we need take the former one
+        * when page != pagecache_page or !pagecache_page.
+        * Note that locking order is always pagecache_page -> page,
+        * so no worry about deadlock.
+        */
+       page = pte_page(entry);
+       if (page != pagecache_page)
                lock_page(page);
-       }
 
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
@@ -2661,9 +2664,8 @@ out_page_table_lock:
        if (pagecache_page) {
                unlock_page(pagecache_page);
                put_page(pagecache_page);
-       } else {
-               unlock_page(page);
        }
+       unlock_page(page);
 
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
index b1873cf03ed986bcb062259da8f1c4093a97160b..65ab5c7067d994ad934c4f4bd5fd5809235a0756 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -712,7 +712,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
        if (!ptep)
                goto out;
 
-       if (pte_write(*ptep)) {
+       if (pte_write(*ptep) || pte_dirty(*ptep)) {
                pte_t entry;
 
                swapped = PageSwapCache(page);
@@ -735,7 +735,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                        set_pte_at(mm, addr, ptep, entry);
                        goto out_unlock;
                }
-               entry = pte_wrprotect(entry);
+               if (pte_dirty(entry))
+                       set_page_dirty(page);
+               entry = pte_mkclean(pte_wrprotect(entry));
                set_pte_at_notify(mm, addr, ptep, entry);
        }
        *orig_pte = *ptep;
index 6128dc8e5ede709cada129438fbac101895aa09d..00161a48a45100c611ebaa053f0b1f1486d09f29 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2009,6 +2009,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
                        removed_exe_file_vma(mm);
                fput(new->vm_file);
        }
+       unlink_anon_vmas(new);
  out_free_mpol:
        mpol_put(pol);
  out_free_vma:
index fc81cb22869ef54e6871daf39f51b32e3377aa98..4029583a10241aaa84e3937ee216740e0a88a363 100644 (file)
@@ -121,8 +121,8 @@ struct task_struct *find_lock_task_mm(struct task_struct *p)
 }
 
 /* return true if the task is not adequate as candidate victim task. */
-static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem,
-                          const nodemask_t *nodemask)
+static bool oom_unkillable_task(struct task_struct *p,
+               const struct mem_cgroup *mem, const nodemask_t *nodemask)
 {
        if (is_global_init(p))
                return true;
@@ -208,8 +208,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
         */
        points += p->signal->oom_score_adj;
 
-       if (points < 0)
-               return 0;
+       /*
+        * Never return 0 for an eligible task that may be killed since it's
+        * possible that no single user task uses more than 0.1% of memory and
+        * no single admin tasks uses more than 3.0%.
+        */
+       if (points <= 0)
+               return 1;
        return (points < 1000) ? points : 1000;
 }
 
@@ -339,26 +344,24 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
 /**
  * dump_tasks - dump current memory state of all system tasks
  * @mem: current's memory controller, if constrained
+ * @nodemask: nodemask passed to page allocator for mempolicy ooms
  *
- * Dumps the current memory state of all system tasks, excluding kernel threads.
+ * Dumps the current memory state of all eligible tasks.  Tasks not in the same
+ * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
+ * are not shown.
  * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
  * value, oom_score_adj value, and name.
  *
- * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
- * shown.
- *
  * Call with tasklist_lock read-locked.
  */
-static void dump_tasks(const struct mem_cgroup *mem)
+static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
 {
        struct task_struct *p;
        struct task_struct *task;
 
        pr_info("[ pid ]   uid  tgid total_vm      rss cpu oom_adj oom_score_adj name\n");
        for_each_process(p) {
-               if (p->flags & PF_KTHREAD)
-                       continue;
-               if (mem && !task_in_mem_cgroup(p, mem))
+               if (oom_unkillable_task(p, mem, nodemask))
                        continue;
 
                task = find_lock_task_mm(p);
@@ -381,7 +384,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
 }
 
 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
-                                                       struct mem_cgroup *mem)
+                       struct mem_cgroup *mem, const nodemask_t *nodemask)
 {
        task_lock(current);
        pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
@@ -394,7 +397,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
        mem_cgroup_print_oom_info(mem, p);
        show_mem();
        if (sysctl_oom_dump_tasks)
-               dump_tasks(mem);
+               dump_tasks(mem, nodemask);
 }
 
 #define K(x) ((x) << (PAGE_SHIFT-10))
@@ -436,7 +439,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
        unsigned int victim_points = 0;
 
        if (printk_ratelimit())
-               dump_header(p, gfp_mask, order, mem);
+               dump_header(p, gfp_mask, order, mem, nodemask);
 
        /*
         * If the task is already exiting, don't alarm the sysadmin or kill
@@ -482,7 +485,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
  */
 static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
-                               int order)
+                               int order, const nodemask_t *nodemask)
 {
        if (likely(!sysctl_panic_on_oom))
                return;
@@ -496,7 +499,7 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
                        return;
        }
        read_lock(&tasklist_lock);
-       dump_header(NULL, gfp_mask, order, NULL);
+       dump_header(NULL, gfp_mask, order, NULL, nodemask);
        read_unlock(&tasklist_lock);
        panic("Out of memory: %s panic_on_oom is enabled\n",
                sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
@@ -509,7 +512,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
        unsigned int points = 0;
        struct task_struct *p;
 
-       check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0);
+       check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
        limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
        read_lock(&tasklist_lock);
 retry:
@@ -641,6 +644,7 @@ static void clear_system_oom(void)
 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
                int order, nodemask_t *nodemask)
 {
+       const nodemask_t *mpol_mask;
        struct task_struct *p;
        unsigned long totalpages;
        unsigned long freed = 0;
@@ -670,7 +674,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
         */
        constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
                                                &totalpages);
-       check_panic_on_oom(constraint, gfp_mask, order);
+       mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
+       check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
 
        read_lock(&tasklist_lock);
        if (sysctl_oom_kill_allocating_task &&
@@ -688,15 +693,13 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
        }
 
 retry:
-       p = select_bad_process(&points, totalpages, NULL,
-                       constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
-                                                                NULL);
+       p = select_bad_process(&points, totalpages, NULL, mpol_mask);
        if (PTR_ERR(p) == -1UL)
                goto out;
 
        /* Found nothing?!?! Either we hang forever, or we panic. */
        if (!p) {
-               dump_header(NULL, gfp_mask, order, NULL);
+               dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
                read_unlock(&tasklist_lock);
                panic("Out of memory and no killable processes...\n");
        }
index 58c572b18b07ffbca4e2120d4e1600705db5fc0e..c76ef3891e0da1c71ac3d1b2d4b1db0abdfe8b9f 100644 (file)
@@ -1401,9 +1401,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
                        if (pcpu_first_unit_cpu == NR_CPUS)
                                pcpu_first_unit_cpu = cpu;
+                       pcpu_last_unit_cpu = cpu;
                }
        }
-       pcpu_last_unit_cpu = cpu;
        pcpu_nr_units = unit;
 
        for_each_possible_cpu(cpu)
index f6f0d2dda2eae8480860cf57f5a9cfce69820716..92e6757f196ed4e3b3598c1f8b7214616a4cbe39 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -381,7 +381,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 {
        if (PageAnon(page)) {
-               if (vma->anon_vma->root != page_anon_vma(page)->root)
+               struct anon_vma *page__anon_vma = page_anon_vma(page);
+               /*
+                * Note: swapoff's unuse_vma() is more efficient with this
+                * check, and needs it to match anon_vma when KSM is active.
+                */
+               if (!vma->anon_vma || !page__anon_vma ||
+                   vma->anon_vma->root != page__anon_vma->root)
                        return -EFAULT;
        } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
                if (!vma->vm_file ||
@@ -1564,13 +1570,14 @@ static void __hugepage_set_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address, int exclusive)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
+
        BUG_ON(!anon_vma);
-       if (!exclusive) {
-               struct anon_vma_chain *avc;
-               avc = list_entry(vma->anon_vma_chain.prev,
-                                struct anon_vma_chain, same_vma);
-               anon_vma = avc->anon_vma;
-       }
+
+       if (PageAnon(page))
+               return;
+       if (!exclusive)
+               anon_vma = anon_vma->root;
+
        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
        page->mapping = (struct address_space *) anon_vma;
        page->index = linear_page_index(vma, address);
@@ -1581,6 +1588,8 @@ void hugepage_add_anon_rmap(struct page *page,
 {
        struct anon_vma *anon_vma = vma->anon_vma;
        int first;
+
+       BUG_ON(!PageLocked(page));
        BUG_ON(!anon_vma);
        BUG_ON(address < vma->vm_start || address >= vma->vm_end);
        first = atomic_inc_and_test(&page->_mapcount);
index c391c320dbafcda04260923f36336891283b614f..c5dfabf25f115a34df8f9111843af28a8d58d906 100644 (file)
@@ -1804,12 +1804,11 @@ static void shrink_zone(int priority, struct zone *zone,
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static bool shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
-       bool all_unreclaimable = true;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1827,8 +1826,38 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
                }
 
                shrink_zone(priority, zone, sc);
-               all_unreclaimable = false;
        }
+}
+
+static bool zone_reclaimable(struct zone *zone)
+{
+       return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
+/*
+ * As hibernation is going on, kswapd is freezed so that it can't mark
+ * the zone into all_unreclaimable. It can't handle OOM during hibernation.
+ * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
+ */
+static bool all_unreclaimable(struct zonelist *zonelist,
+               struct scan_control *sc)
+{
+       struct zoneref *z;
+       struct zone *zone;
+       bool all_unreclaimable = true;
+
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                       gfp_zone(sc->gfp_mask), sc->nodemask) {
+               if (!populated_zone(zone))
+                       continue;
+               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                       continue;
+               if (zone_reclaimable(zone)) {
+                       all_unreclaimable = false;
+                       break;
+               }
+       }
+
        return all_unreclaimable;
 }
 
@@ -1852,7 +1881,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        int priority;
-       bool all_unreclaimable;
        unsigned long total_scanned = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct zoneref *z;
@@ -1869,7 +1897,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               all_unreclaimable = shrink_zones(priority, zonelist, sc);
+               shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
@@ -1931,7 +1959,7 @@ out:
                return sc->nr_reclaimed;
 
        /* top priority shrink_zones still had more to do? don't OOM, then */
-       if (scanning_global_lru(sc) && !all_unreclaimable)
+       if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
                return 1;
 
        return 0;
@@ -2197,8 +2225,7 @@ loop_again:
                        total_scanned += sc.nr_scanned;
                        if (zone->all_unreclaimable)
                                continue;
-                       if (nr_slab == 0 &&
-                           zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
+                       if (nr_slab == 0 && !zone_reclaimable(zone))
                                zone->all_unreclaimable = 1;
                        /*
                         * If we've done a decent amount of scanning and
index 01ddb0472f86c511f49ff8a602a726dd60550ff0..0eb96f7e44befb0155e364749f38eb37af0c2354 100644 (file)
@@ -24,8 +24,11 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
 
        if (vlan_dev)
                skb->dev = vlan_dev;
-       else if (vlan_id)
-               goto drop;
+       else if (vlan_id) {
+               if (!(skb->dev->flags & IFF_PROMISC))
+                       goto drop;
+               skb->pkt_type = PACKET_OTHERHOST;
+       }
 
        return (polling ? netif_receive_skb(skb) : netif_rx(skb));
 
@@ -102,8 +105,11 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
 
        if (vlan_dev)
                skb->dev = vlan_dev;
-       else if (vlan_id)
-               goto drop;
+       else if (vlan_id) {
+               if (!(skb->dev->flags & IFF_PROMISC))
+                       goto drop;
+               skb->pkt_type = PACKET_OTHERHOST;
+       }
 
        for (p = napi->gro_list; p; p = p->next) {
                NAPI_GRO_CB(p)->same_flow =
index 0ea20c30466c7b5758e419fd51c3cba70c9797d7..17c5ba7551a55e79c2c38e22a8fe2fdaa8e00979 100644 (file)
@@ -426,8 +426,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
 
        /* Allocate an fcall for the reply */
        rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
-       if (!rpl_context)
+       if (!rpl_context) {
+               err = -ENOMEM;
                goto err_close;
+       }
 
        /*
         * If the request has a buffer, steal it, otherwise
@@ -445,8 +447,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
        }
        rpl_context->rc = req->rc;
        if (!rpl_context->rc) {
-               kfree(rpl_context);
-               goto err_close;
+               err = -ENOMEM;
+               goto err_free2;
        }
 
        /*
@@ -458,11 +460,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
         */
        if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
                err = post_recv(client, rpl_context);
-               if (err) {
-                       kfree(rpl_context->rc);
-                       kfree(rpl_context);
-                       goto err_close;
-               }
+               if (err)
+                       goto err_free1;
        } else
                atomic_dec(&rdma->rq_count);
 
@@ -471,8 +470,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
 
        /* Post the request */
        c = kmalloc(sizeof *c, GFP_KERNEL);
-       if (!c)
-               goto err_close;
+       if (!c) {
+               err = -ENOMEM;
+               goto err_free1;
+       }
        c->req = req;
 
        c->busa = ib_dma_map_single(rdma->cm_id->device,
@@ -499,9 +500,15 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
        return ib_post_send(rdma->qp, &wr, &bad_wr);
 
  error:
+       kfree(c);
+       kfree(rpl_context->rc);
+       kfree(rpl_context);
        P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
        return -EIO;
-
+ err_free1:
+       kfree(rpl_context->rc);
+ err_free2:
+       kfree(rpl_context);
  err_close:
        spin_lock_irqsave(&rdma->req_lock, flags);
        if (rdma->state < P9_RDMA_CLOSING) {
index dcfbe99ff81c8c0ac56031e1cbbd56bb7ed2fcd4..b88515936e4b3310741e6beefac62376bfe9d832 100644 (file)
@@ -329,7 +329,8 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
 
        mutex_lock(&virtio_9p_lock);
        list_for_each_entry(chan, &virtio_chan_list, chan_list) {
-               if (!strncmp(devname, chan->tag, chan->tag_len)) {
+               if (!strncmp(devname, chan->tag, chan->tag_len) &&
+                   strlen(devname) == chan->tag_len) {
                        if (!chan->inuse) {
                                chan->inuse = true;
                                found = 1;
index 651babdfab3845ebd11eb6cc89fe85eae1d3f8ca..ad2b232a2055fbc241828832078087c62e4c4315 100644 (file)
@@ -399,12 +399,6 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
                        unregister_netdev(net_dev);
                        free_netdev(net_dev);
                }
-               read_lock_irq(&devs_lock);
-               if (list_empty(&br2684_devs)) {
-                       /* last br2684 device */
-                       unregister_atmdevice_notifier(&atm_dev_notifier);
-               }
-               read_unlock_irq(&devs_lock);
                return;
        }
 
@@ -675,7 +669,6 @@ static int br2684_create(void __user *arg)
 
        if (list_empty(&br2684_devs)) {
                /* 1st br2684 device */
-               register_atmdevice_notifier(&atm_dev_notifier);
                brdev->number = 1;
        } else
                brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
@@ -815,6 +808,7 @@ static int __init br2684_init(void)
                return -ENOMEM;
 #endif
        register_atm_ioctl(&br2684_ioctl_ops);
+       register_atmdevice_notifier(&atm_dev_notifier);
        return 0;
 }
 
@@ -830,9 +824,7 @@ static void __exit br2684_exit(void)
 #endif
 
 
-       /* if not already empty */
-       if (!list_empty(&br2684_devs))
-               unregister_atmdevice_notifier(&atm_dev_notifier);
+       unregister_atmdevice_notifier(&atm_dev_notifier);
 
        while (!list_empty(&br2684_devs)) {
                net_dev = list_entry_brdev(br2684_devs.next);
index 251997a9548362c5ebc8a0a34842971a0198539a..282806ba7a57e60991f2f7806bc3015d9b8596a5 100644 (file)
@@ -243,6 +243,7 @@ void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
        unlock_sock_fast(sk, slow);
 
        /* skb is now orphaned, can be freed outside of locked section */
+       trace_kfree_skb(skb, skb_free_datagram_locked);
        __kfree_skb(skb);
 }
 EXPORT_SYMBOL(skb_free_datagram_locked);
index 660dd41aaaa6629c0d59547e04d23353ba935af8..7ec85e27beed840b8da5f9dae620bcd48453f3f7 100644 (file)
 #include <linux/jhash.h>
 #include <linux/random.h>
 #include <trace/events/napi.h>
+#include <trace/events/net.h>
+#include <trace/events/skb.h>
 #include <linux/pci.h>
 
 #include "net-sysfs.h"
@@ -1978,6 +1980,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                }
 
                rc = ops->ndo_start_xmit(skb, dev);
+               trace_net_dev_xmit(skb, rc);
                if (rc == NETDEV_TX_OK)
                        txq_trans_update(txq);
                return rc;
@@ -1998,6 +2001,7 @@ gso:
                        skb_dst_drop(nskb);
 
                rc = ops->ndo_start_xmit(nskb, dev);
+               trace_net_dev_xmit(nskb, rc);
                if (unlikely(rc != NETDEV_TX_OK)) {
                        if (rc & ~NETDEV_TX_MASK)
                                goto out_kfree_gso_skb;
@@ -2186,6 +2190,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 #ifdef CONFIG_NET_CLS_ACT
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
 #endif
+       trace_net_dev_queue(skb);
        if (q->enqueue) {
                rc = __dev_xmit_skb(skb, q, dev, txq);
                goto out;
@@ -2512,6 +2517,7 @@ int netif_rx(struct sk_buff *skb)
        if (netdev_tstamp_prequeue)
                net_timestamp_check(skb);
 
+       trace_netif_rx(skb);
 #ifdef CONFIG_RPS
        {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
@@ -2571,6 +2577,7 @@ static void net_tx_action(struct softirq_action *h)
                        clist = clist->next;
 
                        WARN_ON(atomic_read(&skb->users));
+                       trace_kfree_skb(skb, net_tx_action);
                        __kfree_skb(skb);
                }
        }
@@ -2828,6 +2835,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
        if (!netdev_tstamp_prequeue)
                net_timestamp_check(skb);
 
+       trace_netif_receive_skb(skb);
        if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
                return NET_RX_SUCCESS;
 
index 1cd98df412dfd52daee9cc9105ceddfea0d87a59..e6b133b77ccb5615d65bcdac0ecc01b759808c76 100644 (file)
  *     in any case.
  */
 
-int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
 {
-       int size, err, ct;
+       int size, ct;
+       long err;
 
        if (m->msg_namelen) {
                if (mode == VERIFY_READ) {
index afa6380ed88ac2ee0b5cd8c8a731dcb9f3dcb809..7f1bb2aba03bf0e501ee1625ba6f07163b528ab9 100644 (file)
@@ -26,6 +26,7 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/skb.h>
+#include <trace/events/net.h>
 #include <trace/events/napi.h>
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
index c83b421341c01b4a1de702e42d3380fac2f0aa2b..56ba3c4e4761c6584375a8acf45022bc5c40d6e8 100644 (file)
@@ -466,6 +466,7 @@ void consume_skb(struct sk_buff *skb)
                smp_rmb();
        else if (likely(!atomic_dec_and_test(&skb->users)))
                return;
+       trace_consume_skb(skb);
        __kfree_skb(skb);
 }
 EXPORT_SYMBOL(consume_skb);
index b05b9b6ddb8700989e63e8597f6946ffd205bdba..ef30e9d286e703cccaffbd5ee10394cc167fe057 100644 (file)
@@ -1351,9 +1351,9 @@ int sock_i_uid(struct sock *sk)
 {
        int uid;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        return uid;
 }
 EXPORT_SYMBOL(sock_i_uid);
@@ -1362,9 +1362,9 @@ unsigned long sock_i_ino(struct sock *sk)
 {
        unsigned long ino;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        return ino;
 }
 EXPORT_SYMBOL(sock_i_ino);
index 571f8950ed06f585f4dca482037d4b7985c256af..72380a30d1c89ba1f36f1b2034972f302e886c37 100644 (file)
@@ -217,6 +217,7 @@ config NET_IPIP
 
 config NET_IPGRE
        tristate "IP: GRE tunnels over IP"
+       depends on IPV6 || IPV6=n
        help
          Tunneling means encapsulating data of one protocol type within
          another protocol and sending it over a channel that understands the
index 945b20a5ad5006b6a8ebf84b22756f7573436c1f..35c93e8b6a4694561c838641e546b609e8cbfaa0 100644 (file)
@@ -45,7 +45,7 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
@@ -699,7 +699,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        if ((dst = rt->rt_gateway) == 0)
                                goto tx_error_icmp;
                }
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
                else if (skb->protocol == htons(ETH_P_IPV6)) {
                        struct in6_addr *addr6;
                        int addr_type;
@@ -774,7 +774,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        goto tx_error;
                }
        }
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        else if (skb->protocol == htons(ETH_P_IPV6)) {
                struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
 
@@ -850,7 +850,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        if ((iph->ttl = tiph->ttl) == 0) {
                if (skb->protocol == htons(ETH_P_IP))
                        iph->ttl = old_iph->ttl;
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
                else if (skb->protocol == htons(ETH_P_IPV6))
                        iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
 #endif
index 04b69896df5fc743021efd4d4a1705b7de146333..7649d7750075d184896a9da6d37ed9a35ea5f403 100644 (file)
@@ -488,9 +488,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
         * we can switch to copy when see the first bad fragment.
         */
        if (skb_has_frags(skb)) {
-               struct sk_buff *frag;
+               struct sk_buff *frag, *frag2;
                int first_len = skb_pagelen(skb);
-               int truesizes = 0;
 
                if (first_len - hlen > mtu ||
                    ((first_len - hlen) & 7) ||
@@ -503,18 +502,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                        if (frag->len > mtu ||
                            ((frag->len & 7) && frag->next) ||
                            skb_headroom(frag) < hlen)
-                           goto slow_path;
+                               goto slow_path_clean;
 
                        /* Partially cloned skb? */
                        if (skb_shared(frag))
-                               goto slow_path;
+                               goto slow_path_clean;
 
                        BUG_ON(frag->sk);
                        if (skb->sk) {
                                frag->sk = skb->sk;
                                frag->destructor = sock_wfree;
                        }
-                       truesizes += frag->truesize;
+                       skb->truesize -= frag->truesize;
                }
 
                /* Everything is OK. Generate! */
@@ -524,7 +523,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                frag = skb_shinfo(skb)->frag_list;
                skb_frag_list_init(skb);
                skb->data_len = first_len - skb_headlen(skb);
-               skb->truesize -= truesizes;
                skb->len = first_len;
                iph->tot_len = htons(first_len);
                iph->frag_off = htons(IP_MF);
@@ -576,6 +574,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                }
                IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
                return err;
+
+slow_path_clean:
+               skb_walk_frags(skb, frag2) {
+                       if (frag2 == frag)
+                               break;
+                       frag2->sk = NULL;
+                       frag2->destructor = NULL;
+                       skb->truesize += frag2->truesize;
+               }
        }
 
 slow_path:
index b254dafaf4294548b7d36d9b54ce29832f8658e3..43eec80c0e7c55a2f6dc20790a30603236b7d93f 100644 (file)
@@ -112,6 +112,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        /* ip_route_me_harder expects skb->dst to be set */
        skb_dst_set_noref(nskb, skb_dst(oldskb));
 
+       nskb->protocol = htons(ETH_P_IP);
        if (ip_route_me_harder(nskb, addr_type))
                goto free_nskb;
 
index eab8de32f200af74828bbf0fb3187ff578451e9b..f3a9b42b16c620a7f569e00039beea13bc75bc3c 100644 (file)
@@ -66,9 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
                                          const struct net_device *out,
                                          int (*okfn)(struct sk_buff *))
 {
+       struct sock *sk = skb->sk;
        struct inet_sock *inet = inet_sk(skb->sk);
 
-       if (inet && inet->nodefrag)
+       if (sk && (sk->sk_family == PF_INET) &&
+           inet->nodefrag)
                return NF_ACCEPT;
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
index 1679e2c0963d9b83e34f1f8d6a34b7702a603e2c..ee5f419d0a56d01c6533d6ee45f70a7405480150 100644 (file)
@@ -893,13 +893,15 @@ static void fast_csum(__sum16 *csum,
        unsigned char s[4];
 
        if (offset & 1) {
-               s[0] = s[2] = 0;
+               s[0] = ~0;
                s[1] = ~*optr;
+               s[2] = 0;
                s[3] = *nptr;
        } else {
-               s[1] = s[3] = 0;
                s[0] = ~*optr;
+               s[1] = ~0;
                s[2] = *nptr;
+               s[3] = 0;
        }
 
        *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
index 6298f75d5e93c4ad4e838a1982143207e7d3f855..ac6559cb54f9f650986e4dd86996349e328cd92a 100644 (file)
@@ -1231,7 +1231,7 @@ restart:
                        }
 
                        if (net_ratelimit())
-                               printk(KERN_WARNING "Neighbour table overflow.\n");
+                               printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
                        rt_drop(rt);
                        return -ENOBUFS;
                }
index 3fb1428e526eedb521057a49624fa28dde8b41cd..f115ea68a4efa264c59b20f61222db97a6050a9d 100644 (file)
@@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
         */
 
        mask = 0;
-       if (sk->sk_err)
-               mask = POLLERR;
 
        /*
         * POLLHUP is certainly not done right. But poll() doesn't
@@ -457,6 +455,11 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                if (tp->urg_data & TCP_URG_VALID)
                        mask |= POLLPRI;
        }
+       /* This barrier is coupled with smp_wmb() in tcp_reset() */
+       smp_rmb();
+       if (sk->sk_err)
+               mask |= POLLERR;
+
        return mask;
 }
 EXPORT_SYMBOL(tcp_poll);
@@ -940,7 +943,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        sg = sk->sk_route_caps & NETIF_F_SG;
 
        while (--iovlen >= 0) {
-               int seglen = iov->iov_len;
+               size_t seglen = iov->iov_len;
                unsigned char __user *from = iov->iov_base;
 
                iov++;
index e663b78a2ef6b6286b549aa65b418fe385f32184..b55f60f6fcbe934c1364ee3aece309dff4d1be4b 100644 (file)
@@ -2545,7 +2545,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
                        cnt += tcp_skb_pcount(skb);
 
                if (cnt > packets) {
-                       if (tcp_is_sack(tp) || (oldcnt >= packets))
+                       if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+                           (oldcnt >= packets))
                                break;
 
                        mss = skb_shinfo(skb)->gso_size;
@@ -4048,6 +4049,8 @@ static void tcp_reset(struct sock *sk)
        default:
                sk->sk_err = ECONNRESET;
        }
+       /* This barrier is coupled with smp_rmb() in tcp_poll() */
+       smp_wmb();
 
        if (!sock_flag(sk, SOCK_DEAD))
                sk->sk_error_report(sk);
index c35b469e851c298814d69583bd593ec1c580dde5..74c54b30600f618522e07581c43130eea031f2db 100644 (file)
@@ -135,13 +135,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
 
 /* This function calculates a "timeout" which is equivalent to the timeout of a
  * TCP connection after "boundary" unsuccessful, exponentially backed-off
- * retransmissions with an initial RTO of TCP_RTO_MIN.
+ * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
+ * syn_set flag is set.
  */
 static bool retransmits_timed_out(struct sock *sk,
-                                 unsigned int boundary)
+                                 unsigned int boundary,
+                                 bool syn_set)
 {
        unsigned int timeout, linear_backoff_thresh;
        unsigned int start_ts;
+       unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
 
        if (!inet_csk(sk)->icsk_retransmits)
                return false;
@@ -151,12 +154,12 @@ static bool retransmits_timed_out(struct sock *sk,
        else
                start_ts = tcp_sk(sk)->retrans_stamp;
 
-       linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
+       linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
 
        if (boundary <= linear_backoff_thresh)
-               timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
+               timeout = ((2 << boundary) - 1) * rto_base;
        else
-               timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
+               timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
                          (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
 
        return (tcp_time_stamp - start_ts) >= timeout;
@@ -167,14 +170,15 @@ static int tcp_write_timeout(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        int retry_until;
-       bool do_reset;
+       bool do_reset, syn_set = 0;
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                if (icsk->icsk_retransmits)
                        dst_negative_advice(sk);
                retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+               syn_set = 1;
        } else {
-               if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
+               if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
 
@@ -187,14 +191,14 @@ static int tcp_write_timeout(struct sock *sk)
 
                        retry_until = tcp_orphan_retries(sk, alive);
                        do_reset = alive ||
-                                  !retransmits_timed_out(sk, retry_until);
+                                  !retransmits_timed_out(sk, retry_until, 0);
 
                        if (tcp_out_of_resources(sk, do_reset))
                                return 1;
                }
        }
 
-       if (retransmits_timed_out(sk, retry_until)) {
+       if (retransmits_timed_out(sk, retry_until, syn_set)) {
                /* Has it gone just too far? */
                tcp_write_err(sk);
                return 1;
@@ -436,7 +440,7 @@ out_reset_timer:
                icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
        }
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
-       if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
+       if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
                __sk_dst_reset(sk);
 
 out:;
index 869078d4eeb957a4982ab62e5d408e8fbf42117c..a580349f0b8ab53c77b04b00d504de0bf1f83e09 100644 (file)
@@ -61,7 +61,7 @@ static int xfrm4_get_saddr(struct net *net,
 
 static int xfrm4_get_tos(struct flowi *fl)
 {
-       return fl->fl4_tos;
+       return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
 }
 
 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
index 1ef1366a0a03775eed25ab5ef0b083aed593726e..47947624eccc58fe7fcfcdbff19fb655a07ddedc 100644 (file)
@@ -21,21 +21,25 @@ static int xfrm4_init_flags(struct xfrm_state *x)
 }
 
 static void
-__xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
-                    struct xfrm_tmpl *tmpl,
-                    xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
+{
+       sel->daddr.a4 = fl->fl4_dst;
+       sel->saddr.a4 = fl->fl4_src;
+       sel->dport = xfrm_flowi_dport(fl);
+       sel->dport_mask = htons(0xffff);
+       sel->sport = xfrm_flowi_sport(fl);
+       sel->sport_mask = htons(0xffff);
+       sel->family = AF_INET;
+       sel->prefixlen_d = 32;
+       sel->prefixlen_s = 32;
+       sel->proto = fl->proto;
+       sel->ifindex = fl->oif;
+}
+
+static void
+xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
+                  xfrm_address_t *daddr, xfrm_address_t *saddr)
 {
-       x->sel.daddr.a4 = fl->fl4_dst;
-       x->sel.saddr.a4 = fl->fl4_src;
-       x->sel.dport = xfrm_flowi_dport(fl);
-       x->sel.dport_mask = htons(0xffff);
-       x->sel.sport = xfrm_flowi_sport(fl);
-       x->sel.sport_mask = htons(0xffff);
-       x->sel.family = AF_INET;
-       x->sel.prefixlen_d = 32;
-       x->sel.prefixlen_s = 32;
-       x->sel.proto = fl->proto;
-       x->sel.ifindex = fl->oif;
        x->id = tmpl->id;
        if (x->id.daddr.a4 == 0)
                x->id.daddr.a4 = daddr->a4;
@@ -70,6 +74,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
        .owner                  = THIS_MODULE,
        .init_flags             = xfrm4_init_flags,
        .init_tempsel           = __xfrm4_init_tempsel,
+       .init_temprop           = xfrm4_init_temprop,
        .output                 = xfrm4_output,
        .extract_input          = xfrm4_extract_input,
        .extract_output         = xfrm4_extract_output,
index ab70a3fbcafafee9a080ea0f8c6eab2ce04fc37e..324fac3b6c16db0238d1139649038bcaa9aaced2 100644 (file)
@@ -4637,10 +4637,12 @@ int __init addrconf_init(void)
        if (err < 0) {
                printk(KERN_CRIT "IPv6 Addrconf:"
                       " cannot initialize default policy table: %d.\n", err);
-               return err;
+               goto out;
        }
 
-       register_pernet_subsys(&addrconf_ops);
+       err = register_pernet_subsys(&addrconf_ops);
+       if (err < 0)
+               goto out_addrlabel;
 
        /* The addrconf netdev notifier requires that loopback_dev
         * has it's ipv6 private information allocated and setup
@@ -4692,7 +4694,9 @@ errout:
        unregister_netdevice_notifier(&ipv6_dev_notf);
 errlo:
        unregister_pernet_subsys(&addrconf_ops);
-
+out_addrlabel:
+       ipv6_addr_label_cleanup();
+out:
        return err;
 }
 
@@ -4703,6 +4707,7 @@ void addrconf_cleanup(void)
 
        unregister_netdevice_notifier(&ipv6_dev_notf);
        unregister_pernet_subsys(&addrconf_ops);
+       ipv6_addr_label_cleanup();
 
        rtnl_lock();
 
index f0e774cea386696a72560d9306f441a46817a96d..8175f802651bec4481080823a73f60a21d3a2f36 100644 (file)
@@ -393,6 +393,11 @@ int __init ipv6_addr_label_init(void)
        return register_pernet_subsys(&ipv6_addr_label_ops);
 }
 
+void ipv6_addr_label_cleanup(void)
+{
+       unregister_pernet_subsys(&ipv6_addr_label_ops);
+}
+
 static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
        [IFAL_ADDRESS]          = { .len = sizeof(struct in6_addr), },
        [IFAL_LABEL]            = { .len = sizeof(u32), },
index d40b330c0ee698af62f51f90caf86b2e6cf04c9f..980912ed7a388bd2404b8cb934b2b61e67e12a6c 100644 (file)
@@ -639,7 +639,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
        if (skb_has_frags(skb)) {
                int first_len = skb_pagelen(skb);
-               int truesizes = 0;
+               struct sk_buff *frag2;
 
                if (first_len - hlen > mtu ||
                    ((first_len - hlen) & 7) ||
@@ -651,18 +651,18 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                        if (frag->len > mtu ||
                            ((frag->len & 7) && frag->next) ||
                            skb_headroom(frag) < hlen)
-                           goto slow_path;
+                               goto slow_path_clean;
 
                        /* Partially cloned skb? */
                        if (skb_shared(frag))
-                               goto slow_path;
+                               goto slow_path_clean;
 
                        BUG_ON(frag->sk);
                        if (skb->sk) {
                                frag->sk = skb->sk;
                                frag->destructor = sock_wfree;
-                               truesizes += frag->truesize;
                        }
+                       skb->truesize -= frag->truesize;
                }
 
                err = 0;
@@ -693,7 +693,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
                first_len = skb_pagelen(skb);
                skb->data_len = first_len - skb_headlen(skb);
-               skb->truesize -= truesizes;
                skb->len = first_len;
                ipv6_hdr(skb)->payload_len = htons(first_len -
                                                   sizeof(struct ipv6hdr));
@@ -756,6 +755,15 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                              IPSTATS_MIB_FRAGFAILS);
                dst_release(&rt->dst);
                return err;
+
+slow_path_clean:
+               skb_walk_frags(skb, frag2) {
+                       if (frag2 == frag)
+                               break;
+                       frag2->sk = NULL;
+                       frag2->destructor = NULL;
+                       skb->truesize += frag2->truesize;
+               }
        }
 
 slow_path:
index d126365ac0463bc075d62446562a8d9f07d548a2..8323136bdc54cc24f81acf73acf0d2c256ff8203 100644 (file)
@@ -670,7 +670,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
 
                        if (net_ratelimit())
                                printk(KERN_WARNING
-                                      "Neighbour table overflow.\n");
+                                      "ipv6: Neighbour table overflow.\n");
                        dst_free(&rt->dst);
                        return NULL;
                }
index f417b77fa0e15762715a9498974fbd8f5071401d..a67575d472a320f306002a88959f0d0f9a73af09 100644 (file)
 #include <net/addrconf.h>
 
 static void
-__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
-                    struct xfrm_tmpl *tmpl,
-                    xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
 {
        /* Initialize temporary selector matching only
         * to current session. */
-       ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst);
-       ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src);
-       x->sel.dport = xfrm_flowi_dport(fl);
-       x->sel.dport_mask = htons(0xffff);
-       x->sel.sport = xfrm_flowi_sport(fl);
-       x->sel.sport_mask = htons(0xffff);
-       x->sel.family = AF_INET6;
-       x->sel.prefixlen_d = 128;
-       x->sel.prefixlen_s = 128;
-       x->sel.proto = fl->proto;
-       x->sel.ifindex = fl->oif;
+       ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst);
+       ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src);
+       sel->dport = xfrm_flowi_dport(fl);
+       sel->dport_mask = htons(0xffff);
+       sel->sport = xfrm_flowi_sport(fl);
+       sel->sport_mask = htons(0xffff);
+       sel->family = AF_INET6;
+       sel->prefixlen_d = 128;
+       sel->prefixlen_s = 128;
+       sel->proto = fl->proto;
+       sel->ifindex = fl->oif;
+}
+
+static void
+xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
+                  xfrm_address_t *daddr, xfrm_address_t *saddr)
+{
        x->id = tmpl->id;
        if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
                memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
@@ -168,6 +172,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
        .eth_proto              = htons(ETH_P_IPV6),
        .owner                  = THIS_MODULE,
        .init_tempsel           = __xfrm6_init_tempsel,
+       .init_temprop           = xfrm6_init_temprop,
        .tmpl_sort              = __xfrm6_tmpl_sort,
        .state_sort             = __xfrm6_state_sort,
        .output                 = xfrm6_output,
index fa0f37e4afe4901226b0ccd668ae6a88c136eeb2..28624282c5f36ad5bed8f74c0e8bf7df42ee2d6c 100644 (file)
@@ -2199,9 +2199,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
        struct net_device *prev_dev = NULL;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 
-       if (status->flag & RX_FLAG_INTERNAL_CMTR)
-               goto out_free_skb;
-
        if (skb_headroom(skb) < sizeof(*rthdr) &&
            pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
                goto out_free_skb;
@@ -2260,7 +2257,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
        } else
                goto out_free_skb;
 
-       status->flag |= RX_FLAG_INTERNAL_CMTR;
        return;
 
  out_free_skb:
index 7dcf7a404190e6aa3fa06e642f54279e2f30fba9..8d9e4c949b9618eafc4a7b301d305a15cab805b6 100644 (file)
@@ -48,15 +48,17 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
 {
        unsigned int off, len;
        struct nf_ct_ext_type *t;
+       size_t alloc_size;
 
        rcu_read_lock();
        t = rcu_dereference(nf_ct_ext_types[id]);
        BUG_ON(t == NULL);
        off = ALIGN(sizeof(struct nf_ct_ext), t->align);
        len = off + t->len;
+       alloc_size = t->alloc_size;
        rcu_read_unlock();
 
-       *ext = kzalloc(t->alloc_size, gfp);
+       *ext = kzalloc(alloc_size, gfp);
        if (!*ext)
                return NULL;
 
index 53d892210a049363fa3ab04f6e17b04fa9969bbe..f64de95448669242cf23ac5d8a38987c2f8cd344 100644 (file)
@@ -1376,7 +1376,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
        unsigned int msglen, origlen;
        const char *dptr, *end;
        s16 diff, tdiff = 0;
-       int ret;
+       int ret = NF_ACCEPT;
        typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
 
        if (ctinfo != IP_CT_ESTABLISHED &&
index 5490fc37c92dfa5363a2992fd67fd4f145f65360..daab8c4a903ca20103c1c5d6ebe997fdb657f581 100644 (file)
@@ -70,7 +70,11 @@ nf_tproxy_destructor(struct sk_buff *skb)
 int
 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
 {
-       if (inet_sk(sk)->transparent) {
+       bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
+                               inet_twsk(sk)->tw_transparent :
+                               inet_sk(sk)->transparent;
+
+       if (transparent) {
                skb_orphan(skb);
                skb->sk = sk;
                skb->destructor = nf_tproxy_destructor;
index b2a3ae6cad78e28324e23b857dc0a5773f569786..15003021f4f0a8706e540150425b4f995dc582a6 100644 (file)
@@ -225,12 +225,13 @@ static void pipe_grant_credits(struct sock *sk)
 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
 {
        struct pep_sock *pn = pep_sk(sk);
-       struct pnpipehdr *hdr = pnp_hdr(skb);
+       struct pnpipehdr *hdr;
        int wake = 0;
 
        if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
                return -EINVAL;
 
+       hdr = pnp_hdr(skb);
        if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
                LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
                                (unsigned)hdr->data[0]);
index c397524c039cdb28140ff5f6c0fe2359cebfa3b5..c519939e8da98fd3ae8252355a0ebb9efac4acd5 100644 (file)
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (conn == NULL) {
                state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
                        break;
        }
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        state_change(sk);
 }
 
index 975183fe6950a34b242ef55db011e6f04c1c6772..27844f231d103a4e49e542d670eaca66d01e761d 100644 (file)
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("listen data ready sk %p\n", sk);
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        ready = sk->sk_user_data;
        if (ready == NULL) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
                queue_work(rds_wq, &rds_tcp_listen_work);
 
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 1aba6878fa5dc42d4c54473350fde61d714a184c..e43797404102efcc2ed45117456e839b5425adb1 100644 (file)
@@ -324,7 +324,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (conn == NULL) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -338,7 +338,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
        if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
                queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index a28b895ff0d10194730463b218e3ccb526cdea50..2f012a07d94d16d3aa01d4c78d0894e939dfd751 100644 (file)
@@ -224,7 +224,7 @@ void rds_tcp_write_space(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (conn == NULL) {
                write_space = sk->sk_write_space;
@@ -244,7 +244,7 @@ void rds_tcp_write_space(struct sock *sk)
                queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 
        /*
         * write_space is only called when data leaves tcp's send queue if
index 8e45e76a95f51cdca332263ee7e32108f0beb95c..d952e7eac18867501a0bf7d2034c621c2fd08df7 100644 (file)
@@ -679,7 +679,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
                return -EINVAL;
 
-       if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+       if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
                return -EINVAL;
 
        if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
@@ -739,7 +739,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
        if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
                return -EINVAL;
 
-       if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+       if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
                return -EINVAL;
 
        /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
index b6309db5622689deaf1c76a6acb1f6a09a02602c..fe9306bf10cc7f3bba4590ddf853c2ac39eadae9 100644 (file)
@@ -800,7 +800,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
        u32 _xid;
        __be32 *xp;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        dprintk("RPC:       xs_udp_data_ready...\n");
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
@@ -852,7 +852,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
  dropit:
        skb_free_datagram(sk, skb);
  out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
@@ -1229,7 +1229,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
 
        dprintk("RPC:       xs_tcp_data_ready...\n");
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
        if (xprt->shutdown)
@@ -1248,7 +1248,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
                read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
        } while (read > 0);
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 /*
@@ -1301,7 +1301,7 @@ static void xs_tcp_state_change(struct sock *sk)
 {
        struct rpc_xprt *xprt;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
        dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
@@ -1313,7 +1313,7 @@ static void xs_tcp_state_change(struct sock *sk)
 
        switch (sk->sk_state) {
        case TCP_ESTABLISHED:
-               spin_lock_bh(&xprt->transport_lock);
+               spin_lock(&xprt->transport_lock);
                if (!xprt_test_and_set_connected(xprt)) {
                        struct sock_xprt *transport = container_of(xprt,
                                        struct sock_xprt, xprt);
@@ -1327,7 +1327,7 @@ static void xs_tcp_state_change(struct sock *sk)
 
                        xprt_wake_pending_tasks(xprt, -EAGAIN);
                }
-               spin_unlock_bh(&xprt->transport_lock);
+               spin_unlock(&xprt->transport_lock);
                break;
        case TCP_FIN_WAIT1:
                /* The client initiated a shutdown of the socket */
@@ -1365,7 +1365,7 @@ static void xs_tcp_state_change(struct sock *sk)
                xs_sock_mark_closed(xprt);
        }
  out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 /**
@@ -1376,7 +1376,7 @@ static void xs_error_report(struct sock *sk)
 {
        struct rpc_xprt *xprt;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
        dprintk("RPC:       %s client %p...\n"
@@ -1384,7 +1384,7 @@ static void xs_error_report(struct sock *sk)
                        __func__, xprt, sk->sk_err);
        xprt_wake_pending_tasks(xprt, -EAGAIN);
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static void xs_write_space(struct sock *sk)
@@ -1416,13 +1416,13 @@ static void xs_write_space(struct sock *sk)
  */
 static void xs_udp_write_space(struct sock *sk)
 {
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
 
        /* from net/core/sock.c:sock_def_write_space */
        if (sock_writeable(sk))
                xs_write_space(sk);
 
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 /**
@@ -1437,13 +1437,13 @@ static void xs_udp_write_space(struct sock *sk)
  */
 static void xs_tcp_write_space(struct sock *sk)
 {
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
 
        /* from net/core/stream.c:sk_stream_write_space */
        if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
                xs_write_space(sk);
 
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
index 3feb28e41c5347b85175f57daf223918620723b2..674d426a9d24f9aab7657d1e8ecf342e3be87438 100644 (file)
@@ -152,7 +152,7 @@ static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd,
        } else if (!iwp->pointer)
                return -EFAULT;
 
-       extra = kmalloc(extra_size, GFP_KERNEL);
+       extra = kzalloc(extra_size, GFP_KERNEL);
        if (!extra)
                return -ENOMEM;
 
index 2b3ed7ad49338f3ec2d64caf1dd589c2aefc4264..cbab6e1a8c9c4043fcfdba5bdc17c5e9d5dea45c 100644 (file)
@@ -1175,9 +1175,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
                    tmpl->mode == XFRM_MODE_BEET) {
                        remote = &tmpl->id.daddr;
                        local = &tmpl->saddr;
-                       family = tmpl->encap_family;
-                       if (xfrm_addr_any(local, family)) {
-                               error = xfrm_get_saddr(net, &tmp, remote, family);
+                       if (xfrm_addr_any(local, tmpl->encap_family)) {
+                               error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
                                if (error)
                                        goto fail;
                                local = &tmp;
index 5208b12fbfb4942d4142f79ddb41ccb1420a4c93..eb96ce52f1789dd881116e76a08169189b50f02c 100644 (file)
@@ -656,15 +656,23 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
 EXPORT_SYMBOL(xfrm_sad_getinfo);
 
 static int
-xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
-                 struct xfrm_tmpl *tmpl,
-                 xfrm_address_t *daddr, xfrm_address_t *saddr,
-                 unsigned short family)
+xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
+                   struct xfrm_tmpl *tmpl,
+                   xfrm_address_t *daddr, xfrm_address_t *saddr,
+                   unsigned short family)
 {
        struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
        if (!afinfo)
                return -1;
-       afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
+       afinfo->init_tempsel(&x->sel, fl);
+
+       if (family != tmpl->encap_family) {
+               xfrm_state_put_afinfo(afinfo);
+               afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
+               if (!afinfo)
+                       return -1;
+       }
+       afinfo->init_temprop(x, tmpl, daddr, saddr);
        xfrm_state_put_afinfo(afinfo);
        return 0;
 }
@@ -790,37 +798,38 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
        int error = 0;
        struct xfrm_state *best = NULL;
        u32 mark = pol->mark.v & pol->mark.m;
+       unsigned short encap_family = tmpl->encap_family;
 
        to_put = NULL;
 
        spin_lock_bh(&xfrm_state_lock);
-       h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family);
+       h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
        hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
-               if (x->props.family == family &&
+               if (x->props.family == encap_family &&
                    x->props.reqid == tmpl->reqid &&
                    (mark & x->mark.m) == x->mark.v &&
                    !(x->props.flags & XFRM_STATE_WILDRECV) &&
-                   xfrm_state_addr_check(x, daddr, saddr, family) &&
+                   xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
                    tmpl->mode == x->props.mode &&
                    tmpl->id.proto == x->id.proto &&
                    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
-                       xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+                       xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
                                           &best, &acquire_in_progress, &error);
        }
        if (best)
                goto found;
 
-       h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
+       h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
        hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
-               if (x->props.family == family &&
+               if (x->props.family == encap_family &&
                    x->props.reqid == tmpl->reqid &&
                    (mark & x->mark.m) == x->mark.v &&
                    !(x->props.flags & XFRM_STATE_WILDRECV) &&
-                   xfrm_state_addr_check(x, daddr, saddr, family) &&
+                   xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
                    tmpl->mode == x->props.mode &&
                    tmpl->id.proto == x->id.proto &&
                    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
-                       xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+                       xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
                                           &best, &acquire_in_progress, &error);
        }
 
@@ -829,7 +838,7 @@ found:
        if (!x && !error && !acquire_in_progress) {
                if (tmpl->id.spi &&
                    (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
-                                             tmpl->id.proto, family)) != NULL) {
+                                             tmpl->id.proto, encap_family)) != NULL) {
                        to_put = x0;
                        error = -EEXIST;
                        goto out;
@@ -839,9 +848,9 @@ found:
                        error = -ENOMEM;
                        goto out;
                }
-               /* Initialize temporary selector matching only
+               /* Initialize temporary state matching only
                 * to current session. */
-               xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
+               xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
                memcpy(&x->mark, &pol->mark, sizeof(x->mark));
 
                error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
@@ -856,10 +865,10 @@ found:
                        x->km.state = XFRM_STATE_ACQ;
                        list_add(&x->km.all, &net->xfrm.state_all);
                        hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
-                       h = xfrm_src_hash(net, daddr, saddr, family);
+                       h = xfrm_src_hash(net, daddr, saddr, encap_family);
                        hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
                        if (x->id.spi) {
-                               h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family);
+                               h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
                                hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
                        }
                        x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
index ee03a4f0b64f4361af8c850b3123b8d457bec0bf..06473791c08adb7c5b0a7080ea9600d927c09d94 100644 (file)
@@ -24,6 +24,7 @@ static int __init example_init(void)
 {
        int                     i;
        unsigned int            ret;
+       unsigned int            nents;
        struct scatterlist      sg[10];
 
        printk(KERN_INFO "DMA fifo test start\n");
@@ -61,9 +62,9 @@ static int __init example_init(void)
         * byte at the beginning, after the kfifo_skip().
         */
        sg_init_table(sg, ARRAY_SIZE(sg));
-       ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
-       printk(KERN_INFO "DMA sgl entries: %d\n", ret);
-       if (!ret) {
+       nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
+       printk(KERN_INFO "DMA sgl entries: %d\n", nents);
+       if (!nents) {
                /* fifo is full and no sgl was created */
                printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
                return -EIO;
@@ -71,7 +72,7 @@ static int __init example_init(void)
 
        /* receive data */
        printk(KERN_INFO "scatterlist for receive:\n");
-       for (i = 0; i < ARRAY_SIZE(sg); i++) {
+       for (i = 0; i < nents; i++) {
                printk(KERN_INFO
                "sg[%d] -> "
                "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
@@ -91,16 +92,16 @@ static int __init example_init(void)
        kfifo_dma_in_finish(&fifo, ret);
 
        /* Prepare to transmit data, example: 8 bytes */
-       ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
-       printk(KERN_INFO "DMA sgl entries: %d\n", ret);
-       if (!ret) {
+       nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
+       printk(KERN_INFO "DMA sgl entries: %d\n", nents);
+       if (!nents) {
                /* no data was available and no sgl was created */
                printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
                return -EIO;
        }
 
        printk(KERN_INFO "scatterlist for transmit:\n");
-       for (i = 0; i < ARRAY_SIZE(sg); i++) {
+       for (i = 0; i < nents; i++) {
                printk(KERN_INFO
                "sg[%d] -> "
                "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
index 842dbc2d5aeda0af27ce18af9914deea370dc329..2e088109fbd5238f3e4f6d293848606d0ac95a58 100644 (file)
@@ -11,6 +11,7 @@ hostprogs-$(CONFIG_KALLSYMS)     += kallsyms
 hostprogs-$(CONFIG_LOGO)         += pnmtologo
 hostprogs-$(CONFIG_VT)           += conmakehash
 hostprogs-$(CONFIG_IKCONFIG)     += bin2c
+hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount
 
 always         := $(hostprogs-y) $(hostprogs-m)
 
index a1a5cf95a68d73bd5179474e07b274f77eb5efaf..4d03a7efc68930ffc43c44a428a240cda0c0ec35 100644 (file)
@@ -209,12 +209,16 @@ cmd_modversions =                                                         \
 endif
 
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ifdef BUILD_C_RECORDMCOUNT
+cmd_record_mcount = $(srctree)/scripts/recordmcount "$(@)";
+else
 cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
        "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
        "$(if $(CONFIG_64BIT),64,32)" \
        "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
        "$(if $(part-of-module),1,0)" "$(@)";
 endif
+endif
 
 define rule_cc_o_c
        $(call echo-cmd,checksrc) $(cmd_checksrc)                         \
index 54fd1b700131e1e1fcb0ddd13d89ee3a06983ecb..7bfcf1a09ac599be43a125ddb5cabf17d3a37e65 100644 (file)
@@ -101,14 +101,6 @@ basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))"
 modname_flags  = $(if $(filter 1,$(words $(modname))),\
                  -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
 
-#hash values
-ifdef CONFIG_DYNAMIC_DEBUG
-debug_flags = -D"DEBUG_HASH=$(shell ./scripts/basic/hash djb2 $(@D)$(modname))"\
-              -D"DEBUG_HASH2=$(shell ./scripts/basic/hash r5 $(@D)$(modname))"
-else
-debug_flags =
-endif
-
 orig_c_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
                  $(ccflags-y) $(CFLAGS_$(basetarget).o)
 _c_flags       = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
@@ -152,8 +144,7 @@ endif
 
 c_flags        = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE)     \
                 $(__c_flags) $(modkern_cflags)                           \
-                -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags) \
-                 $(debug_flags)
+                -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
 
 a_flags        = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE)     \
                 $(__a_flags) $(modkern_aflags)
index 09559951df1208e00f5a51efa1208145a87efe14..4c324a1f1e0efb8668b4d64e05b56e8e0f64f25b 100644 (file)
@@ -9,7 +9,7 @@
 # fixdep:       Used to generate dependency information during build process
 # docproc:      Used in Documentation/DocBook
 
-hostprogs-y    := fixdep docproc hash
+hostprogs-y    := fixdep docproc
 always         := $(hostprogs-y)
 
 # fixdep is needed to compile other host programs
diff --git a/scripts/basic/hash.c b/scripts/basic/hash.c
deleted file mode 100644 (file)
index 2ef5d3f..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
- *
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#define DYNAMIC_DEBUG_HASH_BITS 6
-
-static const char *program;
-
-static void usage(void)
-{
-       printf("Usage: %s <djb2|r5> <modname>\n", program);
-       exit(1);
-}
-
-/* djb2 hashing algorithm by Dan Bernstein. From:
- * http://www.cse.yorku.ca/~oz/hash.html
- */
-
-static unsigned int djb2_hash(char *str)
-{
-       unsigned long hash = 5381;
-       int c;
-
-       c = *str;
-       while (c) {
-               hash = ((hash << 5) + hash) + c;
-               c = *++str;
-       }
-       return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
-}
-
-static unsigned int r5_hash(char *str)
-{
-       unsigned long hash = 0;
-       int c;
-
-       c = *str;
-       while (c) {
-               hash = (hash + (c << 4) + (c >> 4)) * 11;
-               c = *++str;
-       }
-       return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
-}
-
-int main(int argc, char *argv[])
-{
-       program = argv[0];
-
-       if (argc != 3)
-               usage();
-       if (!strcmp(argv[1], "djb2"))
-               printf("%d\n", djb2_hash(argv[2]));
-       else if (!strcmp(argv[1], "r5"))
-               printf("%d\n", r5_hash(argv[2]));
-       else
-               usage();
-       exit(0);
-}
-
diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh
new file mode 100644 (file)
index 0000000..520d16b
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/sh
+# Test for gcc 'asm goto' suport
+# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
+
+echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
new file mode 100644 (file)
index 0000000..7f7f718
--- /dev/null
@@ -0,0 +1,345 @@
+/*
+ * recordmcount.c: construct a table of the locations of calls to 'mcount'
+ * so that ftrace can find them quickly.
+ * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>.  All rights reserved.
+ * Licensed under the GNU General Public License, version 2 (GPLv2).
+ *
+ * Restructured to fit Linux format, as well as other updates:
+ *  Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+ */
+
+/*
+ * Strategy: alter the .o file in-place.
+ *
+ * Append a new STRTAB that has the new section names, followed by a new array
+ * ElfXX_Shdr[] that has the new section headers, followed by the section
+ * contents for __mcount_loc and its relocations.  The old shstrtab strings,
+ * and the old ElfXX_Shdr[] array, remain as "garbage" (commonly, a couple
+ * kilobytes.)  Subsequent processing by /bin/ld (or the kernel module loader)
+ * will ignore the garbage regions, because they are not designated by the
+ * new .e_shoff nor the new ElfXX_Shdr[].  [In order to remove the garbage,
+ * then use "ld -r" to create a new file that omits the garbage.]
+ */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <elf.h>
+#include <fcntl.h>
+#include <setjmp.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+static int fd_map;     /* File descriptor for file being modified. */
+static int mmap_failed; /* Boolean flag. */
+static void *ehdr_curr; /* current ElfXX_Ehdr *  for resource cleanup */
+static char gpfx;      /* prefix for global symbol name (sometimes '_') */
+static struct stat sb; /* Remember .st_size, etc. */
+static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */
+
+/* setjmp() return values */
+enum {
+       SJ_SETJMP = 0,  /* hardwired first return */
+       SJ_FAIL,
+       SJ_SUCCEED
+};
+
+/* Per-file resource cleanup when multiple files. */
+static void
+cleanup(void)
+{
+       if (!mmap_failed)
+               munmap(ehdr_curr, sb.st_size);
+       else
+               free(ehdr_curr);
+       close(fd_map);
+}
+
+static void __attribute__((noreturn))
+fail_file(void)
+{
+       cleanup();
+       longjmp(jmpenv, SJ_FAIL);
+}
+
+static void __attribute__((noreturn))
+succeed_file(void)
+{
+       cleanup();
+       longjmp(jmpenv, SJ_SUCCEED);
+}
+
+/* ulseek, uread, ...:  Check return value for errors. */
+
+static off_t
+ulseek(int const fd, off_t const offset, int const whence)
+{
+       off_t const w = lseek(fd, offset, whence);
+       if ((off_t)-1 == w) {
+               perror("lseek");
+               fail_file();
+       }
+       return w;
+}
+
+static size_t
+uread(int const fd, void *const buf, size_t const count)
+{
+       size_t const n = read(fd, buf, count);
+       if (n != count) {
+               perror("read");
+               fail_file();
+       }
+       return n;
+}
+
+static size_t
+uwrite(int const fd, void const *const buf, size_t const count)
+{
+       size_t const n = write(fd, buf, count);
+       if (n != count) {
+               perror("write");
+               fail_file();
+       }
+       return n;
+}
+
+static void *
+umalloc(size_t size)
+{
+       void *const addr = malloc(size);
+       if (0 == addr) {
+               fprintf(stderr, "malloc failed: %zu bytes\n", size);
+               fail_file();
+       }
+       return addr;
+}
+
+/*
+ * Get the whole file as a programming convenience in order to avoid
+ * malloc+lseek+read+free of many pieces.  If successful, then mmap
+ * avoids copying unused pieces; else just read the whole file.
+ * Open for both read and write; new info will be appended to the file.
+ * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr
+ * do not propagate to the file until an explicit overwrite at the last.
+ * This preserves most aspects of consistency (all except .st_size)
+ * for simultaneous readers of the file while we are appending to it.
+ * However, multiple writers still are bad.  We choose not to use
+ * locking because it is expensive and the use case of kernel build
+ * makes multiple writers unlikely.
+ */
+static void *mmap_file(char const *fname)
+{
+       void *addr;
+
+       fd_map = open(fname, O_RDWR);
+       if (0 > fd_map || 0 > fstat(fd_map, &sb)) {
+               perror(fname);
+               fail_file();
+       }
+       if (!S_ISREG(sb.st_mode)) {
+               fprintf(stderr, "not a regular file: %s\n", fname);
+               fail_file();
+       }
+       addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE,
+                   fd_map, 0);
+       mmap_failed = 0;
+       if (MAP_FAILED == addr) {
+               mmap_failed = 1;
+               addr = umalloc(sb.st_size);
+               uread(fd_map, addr, sb.st_size);
+       }
+       return addr;
+}
+
+/* w8rev, w8nat, ...: Handle endianness. */
+
+static uint64_t w8rev(uint64_t const x)
+{
+       return   ((0xff & (x >> (0 * 8))) << (7 * 8))
+              | ((0xff & (x >> (1 * 8))) << (6 * 8))
+              | ((0xff & (x >> (2 * 8))) << (5 * 8))
+              | ((0xff & (x >> (3 * 8))) << (4 * 8))
+              | ((0xff & (x >> (4 * 8))) << (3 * 8))
+              | ((0xff & (x >> (5 * 8))) << (2 * 8))
+              | ((0xff & (x >> (6 * 8))) << (1 * 8))
+              | ((0xff & (x >> (7 * 8))) << (0 * 8));
+}
+
+static uint32_t w4rev(uint32_t const x)
+{
+       return   ((0xff & (x >> (0 * 8))) << (3 * 8))
+              | ((0xff & (x >> (1 * 8))) << (2 * 8))
+              | ((0xff & (x >> (2 * 8))) << (1 * 8))
+              | ((0xff & (x >> (3 * 8))) << (0 * 8));
+}
+
+static uint32_t w2rev(uint16_t const x)
+{
+       return   ((0xff & (x >> (0 * 8))) << (1 * 8))
+              | ((0xff & (x >> (1 * 8))) << (0 * 8));
+}
+
+static uint64_t w8nat(uint64_t const x)
+{
+       return x;
+}
+
+static uint32_t w4nat(uint32_t const x)
+{
+       return x;
+}
+
+static uint32_t w2nat(uint16_t const x)
+{
+       return x;
+}
+
+static uint64_t (*w8)(uint64_t);
+static uint32_t (*w)(uint32_t);
+static uint32_t (*w2)(uint16_t);
+
+/* Names of the sections that could contain calls to mcount. */
+static int
+is_mcounted_section_name(char const *const txtname)
+{
+       return 0 == strcmp(".text",          txtname) ||
+               0 == strcmp(".sched.text",    txtname) ||
+               0 == strcmp(".spinlock.text", txtname) ||
+               0 == strcmp(".irqentry.text", txtname) ||
+               0 == strcmp(".text.unlikely", txtname);
+}
+
+/* 32 bit and 64 bit are very similar */
+#include "recordmcount.h"
+#define RECORD_MCOUNT_64
+#include "recordmcount.h"
+
+static void
+do_file(char const *const fname)
+{
+       Elf32_Ehdr *const ehdr = mmap_file(fname);
+       unsigned int reltype = 0;
+
+       ehdr_curr = ehdr;
+       w = w4nat;
+       w2 = w2nat;
+       w8 = w8nat;
+       switch (ehdr->e_ident[EI_DATA]) {
+               static unsigned int const endian = 1;
+       default: {
+               fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
+                       ehdr->e_ident[EI_DATA], fname);
+               fail_file();
+       } break;
+       case ELFDATA2LSB: {
+               if (1 != *(unsigned char const *)&endian) {
+                       /* main() is big endian, file.o is little endian. */
+                       w = w4rev;
+                       w2 = w2rev;
+                       w8 = w8rev;
+               }
+       } break;
+       case ELFDATA2MSB: {
+               if (0 != *(unsigned char const *)&endian) {
+                       /* main() is little endian, file.o is big endian. */
+                       w = w4rev;
+                       w2 = w2rev;
+                       w8 = w8rev;
+               }
+       } break;
+       }  /* end switch */
+       if (0 != memcmp(ELFMAG, ehdr->e_ident, SELFMAG)
+       ||  ET_REL != w2(ehdr->e_type)
+       ||  EV_CURRENT != ehdr->e_ident[EI_VERSION]) {
+               fprintf(stderr, "unrecognized ET_REL file %s\n", fname);
+               fail_file();
+       }
+
+       gpfx = 0;
+       switch (w2(ehdr->e_machine)) {
+       default: {
+               fprintf(stderr, "unrecognized e_machine %d %s\n",
+                       w2(ehdr->e_machine), fname);
+               fail_file();
+       } break;
+       case EM_386:     reltype = R_386_32;                   break;
+       case EM_ARM:     reltype = R_ARM_ABS32;                break;
+       case EM_IA_64:   reltype = R_IA64_IMM64;   gpfx = '_'; break;
+       case EM_PPC:     reltype = R_PPC_ADDR32;   gpfx = '_'; break;
+       case EM_PPC64:   reltype = R_PPC64_ADDR64; gpfx = '_'; break;
+       case EM_S390:    /* reltype: e_class    */ gpfx = '_'; break;
+       case EM_SH:      reltype = R_SH_DIR32;                 break;
+       case EM_SPARCV9: reltype = R_SPARC_64;     gpfx = '_'; break;
+       case EM_X86_64:  reltype = R_X86_64_64;                break;
+       }  /* end switch */
+
+       switch (ehdr->e_ident[EI_CLASS]) {
+       default: {
+               fprintf(stderr, "unrecognized ELF class %d %s\n",
+                       ehdr->e_ident[EI_CLASS], fname);
+               fail_file();
+       } break;
+       case ELFCLASS32: {
+               if (sizeof(Elf32_Ehdr) != w2(ehdr->e_ehsize)
+               ||  sizeof(Elf32_Shdr) != w2(ehdr->e_shentsize)) {
+                       fprintf(stderr,
+                               "unrecognized ET_REL file: %s\n", fname);
+                       fail_file();
+               }
+               if (EM_S390 == w2(ehdr->e_machine))
+                       reltype = R_390_32;
+               do32(ehdr, fname, reltype);
+       } break;
+       case ELFCLASS64: {
+               Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
+               if (sizeof(Elf64_Ehdr) != w2(ghdr->e_ehsize)
+               ||  sizeof(Elf64_Shdr) != w2(ghdr->e_shentsize)) {
+                       fprintf(stderr,
+                               "unrecognized ET_REL file: %s\n", fname);
+                       fail_file();
+               }
+               if (EM_S390 == w2(ghdr->e_machine))
+                       reltype = R_390_64;
+               do64(ghdr, fname, reltype);
+       } break;
+       }  /* end switch */
+
+       cleanup();
+}
+
+int
+main(int argc, char const *argv[])
+{
+       int n_error = 0;  /* gcc-4.3.0 false positive complaint */
+       if (argc <= 1)
+               fprintf(stderr, "usage: recordmcount file.o...\n");
+       else  /* Process each file in turn, allowing deep failure. */
+       for (--argc, ++argv; 0 < argc; --argc, ++argv) {
+               int const sjval = setjmp(jmpenv);
+               switch (sjval) {
+               default: {
+                       fprintf(stderr, "internal error: %s\n", argv[0]);
+                       exit(1);
+               } break;
+               case SJ_SETJMP: {  /* normal sequence */
+                       /* Avoid problems if early cleanup() */
+                       fd_map = -1;
+                       ehdr_curr = NULL;
+                       mmap_failed = 1;
+                       do_file(argv[0]);
+               } break;
+               case SJ_FAIL: {  /* error in do_file or below */
+                       ++n_error;
+               } break;
+               case SJ_SUCCEED: {  /* premature success */
+                       /* do nothing */
+               } break;
+               }  /* end switch */
+       }
+       return !!n_error;
+}
+
+
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
new file mode 100644 (file)
index 0000000..7f39d09
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * recordmcount.h
+ *
+ * This code was taken out of recordmcount.c written by
+ * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>.  All rights reserved.
+ *
+ * The original code had the same algorithms for both 32bit
+ * and 64bit ELF files, but the code was duplicated to support
+ * the difference in structures that were used. This
+ * file creates a macro of everything that is different between
+ * the 64 and 32 bit code, such that by including this header
+ * twice we can create both sets of functions by including this
+ * header once with RECORD_MCOUNT_64 undefined, and again with
+ * it defined.
+ *
+ * This conversion to macros was done by:
+ * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+ *
+ * Licensed under the GNU General Public License, version 2 (GPLv2).
+ */
+#undef append_func
+#undef sift_rel_mcount
+#undef find_secsym_ndx
+#undef __has_rel_mcount
+#undef has_rel_mcount
+#undef tot_relsize
+#undef do_func
+#undef Elf_Ehdr
+#undef Elf_Shdr
+#undef Elf_Rel
+#undef Elf_Rela
+#undef Elf_Sym
+#undef ELF_R_SYM
+#undef ELF_R_INFO
+#undef ELF_ST_BIND
+#undef uint_t
+#undef _w
+#undef _align
+#undef _size
+
+#ifdef RECORD_MCOUNT_64
+# define append_func           append64
+# define sift_rel_mcount       sift64_rel_mcount
+# define find_secsym_ndx       find64_secsym_ndx
+# define __has_rel_mcount      __has64_rel_mcount
+# define has_rel_mcount                has64_rel_mcount
+# define tot_relsize           tot64_relsize
+# define do_func               do64
+# define Elf_Ehdr              Elf64_Ehdr
+# define Elf_Shdr              Elf64_Shdr
+# define Elf_Rel               Elf64_Rel
+# define Elf_Rela              Elf64_Rela
+# define Elf_Sym               Elf64_Sym
+# define ELF_R_SYM             ELF64_R_SYM
+# define ELF_R_INFO            ELF64_R_INFO
+# define ELF_ST_BIND           ELF64_ST_BIND
+# define uint_t                        uint64_t
+# define _w                    w8
+# define _align                        7u
+# define _size                 8
+#else
+# define append_func           append32
+# define sift_rel_mcount       sift32_rel_mcount
+# define find_secsym_ndx       find32_secsym_ndx
+# define __has_rel_mcount      __has32_rel_mcount
+# define has_rel_mcount                has32_rel_mcount
+# define tot_relsize           tot32_relsize
+# define do_func               do32
+# define Elf_Ehdr              Elf32_Ehdr
+# define Elf_Shdr              Elf32_Shdr
+# define Elf_Rel               Elf32_Rel
+# define Elf_Rela              Elf32_Rela
+# define Elf_Sym               Elf32_Sym
+# define ELF_R_SYM             ELF32_R_SYM
+# define ELF_R_INFO            ELF32_R_INFO
+# define ELF_ST_BIND           ELF32_ST_BIND
+# define uint_t                        uint32_t
+# define _w                    w
+# define _align                        3u
+# define _size                 4
+#endif
+
+/* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */
+static void append_func(Elf_Ehdr *const ehdr,
+                       Elf_Shdr *const shstr,
+                       uint_t const *const mloc0,
+                       uint_t const *const mlocp,
+                       Elf_Rel const *const mrel0,
+                       Elf_Rel const *const mrelp,
+                       unsigned int const rel_entsize,
+                       unsigned int const symsec_sh_link)
+{
+       /* Begin constructing output file */
+       Elf_Shdr mcsec;
+       char const *mc_name = (sizeof(Elf_Rela) == rel_entsize)
+               ? ".rela__mcount_loc"
+               :  ".rel__mcount_loc";
+       unsigned const old_shnum = w2(ehdr->e_shnum);
+       uint_t const old_shoff = _w(ehdr->e_shoff);
+       uint_t const old_shstr_sh_size   = _w(shstr->sh_size);
+       uint_t const old_shstr_sh_offset = _w(shstr->sh_offset);
+       uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size);
+       uint_t new_e_shoff;
+
+       shstr->sh_size = _w(t);
+       shstr->sh_offset = _w(sb.st_size);
+       t += sb.st_size;
+       t += (_align & -t);  /* word-byte align */
+       new_e_shoff = t;
+
+       /* body for new shstrtab */
+       ulseek(fd_map, sb.st_size, SEEK_SET);
+       uwrite(fd_map, old_shstr_sh_offset + (void *)ehdr, old_shstr_sh_size);
+       uwrite(fd_map, mc_name, 1 + strlen(mc_name));
+
+       /* old(modified) Elf_Shdr table, word-byte aligned */
+       ulseek(fd_map, t, SEEK_SET);
+       t += sizeof(Elf_Shdr) * old_shnum;
+       uwrite(fd_map, old_shoff + (void *)ehdr,
+              sizeof(Elf_Shdr) * old_shnum);
+
+       /* new sections __mcount_loc and .rel__mcount_loc */
+       t += 2*sizeof(mcsec);
+       mcsec.sh_name = w((sizeof(Elf_Rela) == rel_entsize) + strlen(".rel")
+               + old_shstr_sh_size);
+       mcsec.sh_type = w(SHT_PROGBITS);
+       mcsec.sh_flags = _w(SHF_ALLOC);
+       mcsec.sh_addr = 0;
+       mcsec.sh_offset = _w(t);
+       mcsec.sh_size = _w((void *)mlocp - (void *)mloc0);
+       mcsec.sh_link = 0;
+       mcsec.sh_info = 0;
+       mcsec.sh_addralign = _w(_size);
+       mcsec.sh_entsize = _w(_size);
+       uwrite(fd_map, &mcsec, sizeof(mcsec));
+
+       mcsec.sh_name = w(old_shstr_sh_size);
+       mcsec.sh_type = (sizeof(Elf_Rela) == rel_entsize)
+               ? w(SHT_RELA)
+               : w(SHT_REL);
+       mcsec.sh_flags = 0;
+       mcsec.sh_addr = 0;
+       mcsec.sh_offset = _w((void *)mlocp - (void *)mloc0 + t);
+       mcsec.sh_size   = _w((void *)mrelp - (void *)mrel0);
+       mcsec.sh_link = w(symsec_sh_link);
+       mcsec.sh_info = w(old_shnum);
+       mcsec.sh_addralign = _w(_size);
+       mcsec.sh_entsize = _w(rel_entsize);
+       uwrite(fd_map, &mcsec, sizeof(mcsec));
+
+       uwrite(fd_map, mloc0, (void *)mlocp - (void *)mloc0);
+       uwrite(fd_map, mrel0, (void *)mrelp - (void *)mrel0);
+
+       ehdr->e_shoff = _w(new_e_shoff);
+       ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum));  /* {.rel,}__mcount_loc */
+       ulseek(fd_map, 0, SEEK_SET);
+       uwrite(fd_map, ehdr, sizeof(*ehdr));
+}
+
+
+/*
+ * Look at the relocations in order to find the calls to mcount.
+ * Accumulate the section offsets that are found, and their relocation info,
+ * onto the end of the existing arrays.
+ */
+static uint_t *sift_rel_mcount(uint_t *mlocp,
+                              unsigned const offbase,
+                              Elf_Rel **const mrelpp,
+                              Elf_Shdr const *const relhdr,
+                              Elf_Ehdr const *const ehdr,
+                              unsigned const recsym,
+                              uint_t const recval,
+                              unsigned const reltype)
+{
+       uint_t *const mloc0 = mlocp;
+       Elf_Rel *mrelp = *mrelpp;
+       Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
+               + (void *)ehdr);
+       unsigned const symsec_sh_link = w(relhdr->sh_link);
+       Elf_Shdr const *const symsec = &shdr0[symsec_sh_link];
+       Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symsec->sh_offset)
+               + (void *)ehdr);
+
+       Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)];
+       char const *const str0 = (char const *)(_w(strsec->sh_offset)
+               + (void *)ehdr);
+
+       Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset)
+               + (void *)ehdr);
+       unsigned rel_entsize = _w(relhdr->sh_entsize);
+       unsigned const nrel = _w(relhdr->sh_size) / rel_entsize;
+       Elf_Rel const *relp = rel0;
+
+       unsigned mcountsym = 0;
+       unsigned t;
+
+       for (t = nrel; t; --t) {
+               if (!mcountsym) {
+                       Elf_Sym const *const symp =
+                               &sym0[ELF_R_SYM(_w(relp->r_info))];
+                       char const *symname = &str0[w(symp->st_name)];
+
+                       if ('.' == symname[0])
+                               ++symname;  /* ppc64 hack */
+                       if (0 == strcmp((('_' == gpfx) ? "_mcount" : "mcount"),
+                                       symname))
+                               mcountsym = ELF_R_SYM(_w(relp->r_info));
+               }
+
+               if (mcountsym == ELF_R_SYM(_w(relp->r_info))) {
+                       uint_t const addend = _w(_w(relp->r_offset) - recval);
+
+                       mrelp->r_offset = _w(offbase
+                               + ((void *)mlocp - (void *)mloc0));
+                       mrelp->r_info = _w(ELF_R_INFO(recsym, reltype));
+                       if (sizeof(Elf_Rela) == rel_entsize) {
+                               ((Elf_Rela *)mrelp)->r_addend = addend;
+                               *mlocp++ = 0;
+                       } else
+                               *mlocp++ = addend;
+
+                       mrelp = (Elf_Rel *)(rel_entsize + (void *)mrelp);
+               }
+               relp = (Elf_Rel const *)(rel_entsize + (void *)relp);
+       }
+       *mrelpp = mrelp;
+       return mlocp;
+}
+
+
+/*
+ * Find a symbol in the given section, to be used as the base for relocating
+ * the table of offsets of calls to mcount.  A local or global symbol suffices,
+ * but avoid a Weak symbol because it may be overridden; the change in value
+ * would invalidate the relocations of the offsets of the calls to mcount.
+ * Often the found symbol will be the unnamed local symbol generated by
+ * GNU 'as' for the start of each section.  For example:
+ *    Num:    Value  Size Type    Bind   Vis      Ndx Name
+ *      2: 00000000     0 SECTION LOCAL  DEFAULT    1
+ */
+static unsigned find_secsym_ndx(unsigned const txtndx,
+                               char const *const txtname,
+                               uint_t *const recvalp,
+                               Elf_Shdr const *const symhdr,
+                               Elf_Ehdr const *const ehdr)
+{
+       Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset)
+               + (void *)ehdr);
+       unsigned const nsym = _w(symhdr->sh_size) / _w(symhdr->sh_entsize);
+       Elf_Sym const *symp;
+       unsigned t;
+
+       for (symp = sym0, t = nsym; t; --t, ++symp) {
+               unsigned int const st_bind = ELF_ST_BIND(symp->st_info);
+
+               if (txtndx == w2(symp->st_shndx)
+                       /* avoid STB_WEAK */
+                   && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
+                       *recvalp = _w(symp->st_value);
+                       return symp - sym0;
+               }
+       }
+       fprintf(stderr, "Cannot find symbol for section %d: %s.\n",
+               txtndx, txtname);
+       fail_file();
+}
+
+
+/* Evade ISO C restriction: no declaration after statement in has_rel_mcount. */
+static char const *
+__has_rel_mcount(Elf_Shdr const *const relhdr,  /* is SHT_REL or SHT_RELA */
+                Elf_Shdr const *const shdr0,
+                char const *const shstrtab,
+                char const *const fname)
+{
+       /* .sh_info depends on .sh_type == SHT_REL[,A] */
+       Elf_Shdr const *const txthdr = &shdr0[w(relhdr->sh_info)];
+       char const *const txtname = &shstrtab[w(txthdr->sh_name)];
+
+       if (0 == strcmp("__mcount_loc", txtname)) {
+               fprintf(stderr, "warning: __mcount_loc already exists: %s\n",
+                       fname);
+               succeed_file();
+       }
+       if (SHT_PROGBITS != w(txthdr->sh_type) ||
+           !is_mcounted_section_name(txtname))
+               return NULL;
+       return txtname;
+}
+
+static char const *has_rel_mcount(Elf_Shdr const *const relhdr,
+                                 Elf_Shdr const *const shdr0,
+                                 char const *const shstrtab,
+                                 char const *const fname)
+{
+       if (SHT_REL  != w(relhdr->sh_type) && SHT_RELA != w(relhdr->sh_type))
+               return NULL;
+       return __has_rel_mcount(relhdr, shdr0, shstrtab, fname);
+}
+
+
+static unsigned tot_relsize(Elf_Shdr const *const shdr0,
+                           unsigned nhdr,
+                           const char *const shstrtab,
+                           const char *const fname)
+{
+       unsigned totrelsz = 0;
+       Elf_Shdr const *shdrp = shdr0;
+
+       for (; nhdr; --nhdr, ++shdrp) {
+               if (has_rel_mcount(shdrp, shdr0, shstrtab, fname))
+                       totrelsz += _w(shdrp->sh_size);
+       }
+       return totrelsz;
+}
+
+
+/* Overall supervision for Elf32 ET_REL file. */
+static void
+do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype)
+{
+       Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
+               + (void *)ehdr);
+       unsigned const nhdr = w2(ehdr->e_shnum);
+       Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)];
+       char const *const shstrtab = (char const *)(_w(shstr->sh_offset)
+               + (void *)ehdr);
+
+       Elf_Shdr const *relhdr;
+       unsigned k;
+
+       /* Upper bound on space: assume all relevant relocs are for mcount. */
+       unsigned const totrelsz = tot_relsize(shdr0, nhdr, shstrtab, fname);
+       Elf_Rel *const mrel0 = umalloc(totrelsz);
+       Elf_Rel *      mrelp = mrel0;
+
+       /* 2*sizeof(address) <= sizeof(Elf_Rel) */
+       uint_t *const mloc0 = umalloc(totrelsz>>1);
+       uint_t *      mlocp = mloc0;
+
+       unsigned rel_entsize = 0;
+       unsigned symsec_sh_link = 0;
+
+       for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
+               char const *const txtname = has_rel_mcount(relhdr, shdr0,
+                       shstrtab, fname);
+               if (txtname) {
+                       uint_t recval = 0;
+                       unsigned const recsym = find_secsym_ndx(
+                               w(relhdr->sh_info), txtname, &recval,
+                               &shdr0[symsec_sh_link = w(relhdr->sh_link)],
+                               ehdr);
+
+                       rel_entsize = _w(relhdr->sh_entsize);
+                       mlocp = sift_rel_mcount(mlocp,
+                               (void *)mlocp - (void *)mloc0, &mrelp,
+                               relhdr, ehdr, recsym, recval, reltype);
+               }
+       }
+       if (mloc0 != mlocp) {
+               append_func(ehdr, shstr, mloc0, mlocp, mrel0, mrelp,
+                           rel_entsize, symsec_sh_link);
+       }
+       free(mrel0);
+       free(mloc0);
+}
index ef43995119a453401dd768adfa5ae41a2602dd3a..c668b447c72594494417f6be50795f3b49f860bf 100644 (file)
@@ -1416,15 +1416,19 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
        const pid_t gpid = task_pid_nr(current);
        static const int tomoyo_buffer_len = 4096;
        char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
+       pid_t ppid;
        if (!buffer)
                return NULL;
        do_gettimeofday(&tv);
+       rcu_read_lock();
+       ppid = task_tgid_vnr(current->real_parent);
+       rcu_read_unlock();
        snprintf(buffer, tomoyo_buffer_len - 1,
                 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)"
                 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u"
                 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
                 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid,
-                (pid_t) sys_getpid(), (pid_t) sys_getppid(),
+                task_tgid_vnr(current), ppid,
                 current_uid(), current_gid(), current_euid(),
                 current_egid(), current_suid(), current_sgid(),
                 current_fsuid(), current_fsgid());
index 04454cb7b24a534e84c8873a8babeb9614a8da60..7c66bd898782ce0c6fa8ea037cd19dfd78fc8bfe 100644 (file)
@@ -689,9 +689,6 @@ struct tomoyo_profile {
 
 /********** Function prototypes. **********/
 
-extern asmlinkage long sys_getpid(void);
-extern asmlinkage long sys_getppid(void);
-
 /* Check whether the given string starts with the given keyword. */
 bool tomoyo_str_starts(char **src, const char *find);
 /* Get tomoyo_realpath() of current process. */
index 070aab4901914870a0af43faef27e82e761acc32..45a818002d990f664cffadd066488a21c76fedd7 100644 (file)
@@ -31,6 +31,7 @@
 
 /* max number of user-defined controls */
 #define MAX_USER_CONTROLS      32
+#define MAX_CONTROL_COUNT      1028
 
 struct snd_kctl_ioctl {
        struct list_head list;          /* list of all ioctls */
@@ -195,6 +196,10 @@ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
        
        if (snd_BUG_ON(!control || !control->count))
                return NULL;
+
+       if (control->count > MAX_CONTROL_COUNT)
+               return NULL;
+
        kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
        if (kctl == NULL) {
                snd_printk(KERN_ERR "Cannot allocate control instance\n");
index 1adb8a3c2b62db229f9ba0b580d930d288e74c14..42d7844ecd0bfa66ddf3db96527ccc18c79f7485 100644 (file)
@@ -900,7 +900,7 @@ static int proc_init(struct snd_akm4xxx *ak)
        return 0;
 }
 #else /* !CONFIG_PROC_FS */
-static int proc_init(struct snd_akm4xxx *ak) {}
+static int proc_init(struct snd_akm4xxx *ak) { return 0; }
 #endif
 
 int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak)
index b697fd2a6f8b8cd19a84840fd8e05369226981db..10bbbaf6ebc3d2b0531ba2fb1ab7e3f2be1f8994 100644 (file)
@@ -3641,6 +3641,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = {
        /* Lenovo Thinkpad T61/X61 */
        SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
        SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
+       SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
        {}
 };
 
index a1312a6c8af2c91453be56f1b554de11307ce4d0..a432e6efd19bbe7bbce3ad9b6607c44661f912cf 100644 (file)
@@ -1594,12 +1594,22 @@ static void alc_auto_parse_digital(struct hda_codec *codec)
        }
 
        if (spec->autocfg.dig_in_pin) {
-               hda_nid_t dig_nid;
-               err = snd_hda_get_connections(codec,
-                                             spec->autocfg.dig_in_pin,
-                                             &dig_nid, 1);
-               if (err > 0)
-                       spec->dig_in_nid = dig_nid;
+               dig_nid = codec->start_nid;
+               for (i = 0; i < codec->num_nodes; i++, dig_nid++) {
+                       unsigned int wcaps = get_wcaps(codec, dig_nid);
+                       if (get_wcaps_type(wcaps) != AC_WID_AUD_IN)
+                               continue;
+                       if (!(wcaps & AC_WCAP_DIGITAL))
+                               continue;
+                       if (!(wcaps & AC_WCAP_CONN_LIST))
+                               continue;
+                       err = get_connection_index(codec, dig_nid,
+                                                  spec->autocfg.dig_in_pin);
+                       if (err >= 0) {
+                               spec->dig_in_nid = dig_nid;
+                               break;
+                       }
+               }
        }
 }
 
index 289cb4dacfc79ec012b6efb174054efaa0afe855..6c0a11adb2a84511b83b23b69d5efce9fb2085c6 100644 (file)
@@ -543,6 +543,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip,
                chip->model.suspend = claro_suspend;
                chip->model.resume = claro_resume;
                chip->model.set_adc_params = set_ak5385_params;
+               chip->model.device_config = PLAYBACK_0_TO_I2S |
+                                           PLAYBACK_1_TO_SPDIF |
+                                           CAPTURE_0_FROM_I2S_2 |
+                                           CAPTURE_1_FROM_SPDIF;
                break;
        }
        if (id->driver_data == MODEL_MERIDIAN ||
index b92adef8e81e7ae61eef5681925610b87f03a502..d6fa7bfd9aa123d7f8bb8142a8def39888129434 100644 (file)
@@ -4609,6 +4609,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
                if (err < 0)
                        return err;
 
+               memset(&info, 0, sizeof(info));
                spin_lock_irqsave(&hdsp->lock, flags);
                info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
                info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
index 547b713d720449a7bdd746ca04fbf5a6cbe84931..0c98ef9156d8fd919f81711fe7b798be7c0ca7d8 100644 (file)
@@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
 
        case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
 
+               memset(&info, 0, sizeof(info));
                spin_lock_irq(&hdspm->lock);
                info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
                info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
index b823a5c9b9bc81d81b8f64f4847483eae3f253bc..87e2b7fcbf176d9f429506b285dcc2f6ac05d7bc 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/firmware.h>
 #include <linux/module.h>
 
+#include <asm/clkdev.h>
 #include <asm/clock.h>
 
 #include <cpu/sh7722.h>
@@ -40,12 +41,12 @@ static struct clk_ops siumckb_clk_ops = {
 };
 
 static struct clk siumckb_clk = {
-       .name           = "siumckb_clk",
-       .id             = -1,
        .ops            = &siumckb_clk_ops,
        .rate           = 0, /* initialised at run-time */
 };
 
+static struct clk_lookup *siumckb_lookup;
+
 static int migor_hw_params(struct snd_pcm_substream *substream,
                           struct snd_pcm_hw_params *params)
 {
@@ -180,6 +181,13 @@ static int __init migor_init(void)
        if (ret < 0)
                return ret;
 
+       siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL);
+       if (!siumckb_lookup) {
+               ret = -ENOMEM;
+               goto eclkdevalloc;
+       }
+       clkdev_add(siumckb_lookup);
+
        /* Port number used on this machine: port B */
        migor_snd_device = platform_device_alloc("soc-audio", 1);
        if (!migor_snd_device) {
@@ -200,12 +208,15 @@ static int __init migor_init(void)
 epdevadd:
        platform_device_put(migor_snd_device);
 epdevalloc:
+       clkdev_drop(siumckb_lookup);
+eclkdevalloc:
        clk_unregister(&siumckb_clk);
        return ret;
 }
 
 static void __exit migor_exit(void)
 {
+       clkdev_drop(siumckb_lookup);
        clk_unregister(&siumckb_clk);
        platform_device_unregister(migor_snd_device);
 }
index adbc68ce90508221cc919121fc19e062fd309f44..f6b0d2829ea96d438d1e84550272c6a7bda80b4c 100644 (file)
@@ -203,8 +203,9 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
        data[1] = (value >> 8) & 0xff;
        data[2] = value & 0xff;
 
-       if (!snd_soc_codec_volatile_register(codec, reg))
-               reg_cache[reg] = value;
+       if (!snd_soc_codec_volatile_register(codec, reg)
+               && reg < codec->reg_cache_size)
+                       reg_cache[reg] = value;
 
        if (codec->cache_only) {
                codec->cache_sync = 1;
index 5164a655c39f60b578c8642f2caabe8af66fa016..b2c63309a65165b471822e99268c828bbdb07777 100644 (file)
@@ -8,7 +8,7 @@ perf-annotate - Read perf.data (created by perf record) and display annotated co
 SYNOPSIS
 --------
 [verse]
-'perf annotate' [-i <file> | --input=file] symbol_name
+'perf annotate' [-i <file> | --input=file] [symbol_name]
 
 DESCRIPTION
 -----------
@@ -24,6 +24,13 @@ OPTIONS
 --input=::
         Input file name. (default: perf.data)
 
+--stdio:: Use the stdio interface.
+
+--tui:: Use the TUI interface Use of --tui requires a tty, if one is not
+       present, as when piping to other commands, the stdio interface is
+       used. This interfaces starts by centering on the line with more
+       samples, TAB/UNTAB cycles thru the lines with more samples.
+
 SEE ALSO
 --------
-linkperf:perf-record[1]
+linkperf:perf-record[1], linkperf:perf-report[1]
index abfabe9147a4f2a48b6fd47bfdb758de2a3f3eea..12052c9ed0babfc3a1c93cc01758ec3b7747ee10 100644 (file)
@@ -65,6 +65,13 @@ OPTIONS
                 the tree is considered as a new profiled object. +
        Default: fractal,0.5.
 
+--stdio:: Use the stdio interface.
+
+--tui:: Use the TUI interface, that is integrated with annotate and allows
+        zooming into DSOs or threads, among other features. Use of --tui
+       requires a tty, if one is not present, as when piping to other
+       commands, the stdio interface is used.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1]
index 4f1fa77c1feb0b7a854ab8a85bd21682cbc66377..d1db0f676a4bf14850fa0264e78fe3d482d376dc 100644 (file)
@@ -313,6 +313,9 @@ TEST_PROGRAMS =
 
 SCRIPT_SH += perf-archive.sh
 
+grep-libs = $(filter -l%,$(1))
+strip-libs = $(filter-out -l%,$(1))
+
 #
 # No Perl scripts right now:
 #
@@ -588,14 +591,17 @@ endif
 ifdef NO_LIBPERL
        BASIC_CFLAGS += -DNO_LIBPERL
 else
-       PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
+       PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
+       PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
+       PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
        PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
        FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
 
        ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED)),y)
                BASIC_CFLAGS += -DNO_LIBPERL
        else
-               ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
+               ALL_LDFLAGS += $(PERL_EMBED_LDFLAGS)
+               EXTLIBS += $(PERL_EMBED_LIBADD)
                LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
                LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
        endif
@@ -604,13 +610,16 @@ endif
 ifdef NO_LIBPYTHON
        BASIC_CFLAGS += -DNO_LIBPYTHON
 else
-       PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
+       PYTHON_EMBED_LDOPTS = $(shell python-config --ldflags 2>/dev/null)
+       PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
+       PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS))
        PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
        FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
        ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y)
                BASIC_CFLAGS += -DNO_LIBPYTHON
        else
-               ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
+               ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
+               EXTLIBS += $(PYTHON_EMBED_LIBADD)
                LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
                LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
        endif
@@ -653,6 +662,15 @@ else
        endif
 endif
 
+
+ifdef NO_STRLCPY
+       BASIC_CFLAGS += -DNO_STRLCPY
+else
+       ifneq ($(call try-cc,$(SOURCE_STRLCPY),),y)
+               BASIC_CFLAGS += -DNO_STRLCPY
+       endif
+endif
+
 ifndef CC_LD_DYNPATH
        ifdef NO_R_TO_GCC_LINKER
                # Some gcc does not accept and pass -R to the linker to specify
@@ -910,8 +928,8 @@ $(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
                $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@
 
 $(OUTPUT)perf$X: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
-       $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(OUTPUT)perf.o \
-               $(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS)
+       $(QUIET_LINK)$(CC) $(ALL_CFLAGS) $(ALL_LDFLAGS) $(OUTPUT)perf.o \
+               $(BUILTIN_OBJS) $(LIBS) -o $@
 
 $(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
@@ -1017,7 +1035,7 @@ builtin-revert.o wt-status.o: wt-status.h
 # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
 # we depend the various files onto their directories.
 DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
-$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
+$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
 # In the second step, we make a rule to actually create these directories
 $(sort $(dir $(DIRECTORY_DEPS))):
        $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
index 1478dc64bf157fc0f226cba51eef80ec8646c15f..6d5604d8df9599acb55d87017f5d58e19d906395 100644 (file)
@@ -28,7 +28,7 @@
 
 static char            const *input_name = "perf.data";
 
-static bool            force;
+static bool            force, use_tui, use_stdio;
 
 static bool            full_paths;
 
@@ -321,7 +321,7 @@ static int hist_entry__tty_annotate(struct hist_entry *he)
 
 static void hists__find_annotations(struct hists *self)
 {
-       struct rb_node *first = rb_first(&self->entries), *nd = first;
+       struct rb_node *nd = rb_first(&self->entries), *next;
        int key = KEY_RIGHT;
 
        while (nd) {
@@ -343,20 +343,19 @@ find_next:
 
                if (use_browser > 0) {
                        key = hist_entry__tui_annotate(he);
-                       if (is_exit_key(key))
-                               break;
                        switch (key) {
                        case KEY_RIGHT:
-                       case '\t':
-                               nd = rb_next(nd);
+                               next = rb_next(nd);
                                break;
                        case KEY_LEFT:
-                               if (nd == first)
-                                       continue;
-                               nd = rb_prev(nd);
-                       default:
+                               next = rb_prev(nd);
                                break;
+                       default:
+                               return;
                        }
+
+                       if (next != NULL)
+                               nd = next;
                } else {
                        hist_entry__tty_annotate(he);
                        nd = rb_next(nd);
@@ -428,6 +427,8 @@ static const struct option options[] = {
                    "be more verbose (show symbol address, etc)"),
        OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
                    "dump raw trace in ASCII"),
+       OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
+       OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
        OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
                   "file", "vmlinux pathname"),
        OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
@@ -443,6 +444,11 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
 {
        argc = parse_options(argc, argv, options, annotate_usage, 0);
 
+       if (use_stdio)
+               use_browser = 0;
+       else if (use_tui)
+               use_browser = 1;
+
        setup_browser();
 
        symbol_conf.priv_size = sizeof(struct sym_priv);
index 55fc1f46892a6a920411db7dc91226bcbe6a7f82..5de405d452300318541338293563d8ebc41ccb87 100644 (file)
@@ -32,7 +32,7 @@
 
 static char            const *input_name = "perf.data";
 
-static bool            force;
+static bool            force, use_tui, use_stdio;
 static bool            hide_unresolved;
 static bool            dont_use_callchains;
 
@@ -107,7 +107,8 @@ static int perf_session__add_hist_entry(struct perf_session *self,
                goto out_free_syms;
        err = 0;
        if (symbol_conf.use_callchain) {
-               err = append_chain(he->callchain, data->callchain, syms, data->period);
+               err = callchain_append(he->callchain, data->callchain, syms,
+                                      data->period);
                if (err)
                        goto out_free_syms;
        }
@@ -450,6 +451,8 @@ static const struct option options[] = {
                    "Show per-thread event counters"),
        OPT_STRING(0, "pretty", &pretty_printing_style, "key",
                   "pretty printing style key: normal raw"),
+       OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
+       OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
        OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
                   "sort by key(s): pid, comm, dso, symbol, parent"),
        OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -482,8 +485,15 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
 {
        argc = parse_options(argc, argv, options, report_usage, 0);
 
+       if (use_stdio)
+               use_browser = 0;
+       else if (use_tui)
+               use_browser = 1;
+
        if (strcmp(input_name, "-") != 0)
                setup_browser();
+       else
+               use_browser = 0;
        /*
         * Only in the newt browser we are doing integrated annotation,
         * so don't allocate extra space that won't be used in the stdio
index 7a7b6085905382c791834b0c0f35ed5f39658e26..b253db634f04b7e8ddfddd1cc33bb3ce8343a49a 100644 (file)
@@ -110,6 +110,17 @@ int main(void)
 }
 endef
 
+define SOURCE_STRLCPY
+#include <stdlib.h>
+extern size_t strlcpy(char *dest, const char *src, size_t size);
+
+int main(void)
+{
+       strlcpy(NULL, NULL, 0);
+       return 0;
+}
+endef
+
 # try-cc
 # Usage: option = $(call try-cc, source-to-build, cc-options)
 try-cc = $(shell sh -c                                           \
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record
new file mode 100644 (file)
index 0000000..d931a82
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/bash
+perf record -a -e net:net_dev_xmit -e net:net_dev_queue                \
+               -e net:netif_receive_skb -e net:netif_rx                \
+               -e skb:consume_skb -e skb:kfree_skb                     \
+               -e skb:skb_copy_datagram_iovec -e napi:napi_poll        \
+               -e irq:irq_handler_entry -e irq:irq_handler_exit        \
+               -e irq:softirq_entry -e irq:softirq_exit                \
+               -e irq:softirq_raise $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-report b/tools/perf/scripts/python/bin/netdev-times-report
new file mode 100644 (file)
index 0000000..c3d0a63
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash
+# description: display a process of packet and processing time
+# args: [tx] [rx] [dev=] [debug]
+
+perf trace -s ~/libexec/perf-core/scripts/python/netdev-times.py $@
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
new file mode 100644 (file)
index 0000000..9aa0a32
--- /dev/null
@@ -0,0 +1,464 @@
+# Display a process of packets and processed time.
+# It helps us to investigate networking or network device.
+#
+# options
+# tx: show only tx chart
+# rx: show only rx chart
+# dev=: show only thing related to specified device
+# debug: work with debug mode. It shows buffer status.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+       '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+all_event_list = []; # insert all tracepoint event related with this script
+irq_dic = {}; # key is cpu and value is a list which stacks irqs
+              # which raise NET_RX softirq
+net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
+                # and a list which stacks receive
+receive_hunk_list = []; # a list which include a sequence of receive events
+rx_skb_list = []; # received packet list for matching
+                      # skb_copy_datagram_iovec
+
+buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
+                      # tx_xmit_list
+of_count_rx_skb_list = 0; # overflow count
+
+tx_queue_list = []; # list of packets which pass through dev_queue_xmit
+of_count_tx_queue_list = 0; # overflow count
+
+tx_xmit_list = [];  # list of packets which pass through dev_hard_start_xmit
+of_count_tx_xmit_list = 0; # overflow count
+
+tx_free_list = [];  # list of packets which is freed
+
+# options
+show_tx = 0;
+show_rx = 0;
+dev = 0; # store a name of device specified by option "dev="
+debug = 0;
+
+# indices of event_info tuple
+EINFO_IDX_NAME=   0
+EINFO_IDX_CONTEXT=1
+EINFO_IDX_CPU=    2
+EINFO_IDX_TIME=   3
+EINFO_IDX_PID=    4
+EINFO_IDX_COMM=   5
+
+# Calculate a time interval(msec) from src(nsec) to dst(nsec)
+def diff_msec(src, dst):
+       return (dst - src) / 1000000.0
+
+# Display a process of transmitting a packet
+def print_transmit(hunk):
+       if dev != 0 and hunk['dev'].find(dev) < 0:
+               return
+       print "%7s %5d %6d.%06dsec %12.3fmsec      %12.3fmsec" % \
+               (hunk['dev'], hunk['len'],
+               nsecs_secs(hunk['queue_t']),
+               nsecs_nsecs(hunk['queue_t'])/1000,
+               diff_msec(hunk['queue_t'], hunk['xmit_t']),
+               diff_msec(hunk['xmit_t'], hunk['free_t']))
+
+# Format for displaying rx packet processing
+PF_IRQ_ENTRY= "  irq_entry(+%.3fmsec irq=%d:%s)"
+PF_SOFT_ENTRY="  softirq_entry(+%.3fmsec)"
+PF_NAPI_POLL= "  napi_poll_exit(+%.3fmsec %s)"
+PF_JOINT=     "         |"
+PF_WJOINT=    "         |            |"
+PF_NET_RECV=  "         |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
+PF_NET_RX=    "         |---netif_rx(+%.3fmsec skb=%x)"
+PF_CPY_DGRAM= "         |      skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
+PF_KFREE_SKB= "         |      kfree_skb(+%.3fmsec location=%x)"
+PF_CONS_SKB=  "         |      consume_skb(+%.3fmsec)"
+
+# Display a process of received packets and interrputs associated with
+# a NET_RX softirq
+def print_receive(hunk):
+       show_hunk = 0
+       irq_list = hunk['irq_list']
+       cpu = irq_list[0]['cpu']
+       base_t = irq_list[0]['irq_ent_t']
+       # check if this hunk should be showed
+       if dev != 0:
+               for i in range(len(irq_list)):
+                       if irq_list[i]['name'].find(dev) >= 0:
+                               show_hunk = 1
+                               break
+       else:
+               show_hunk = 1
+       if show_hunk == 0:
+               return
+
+       print "%d.%06dsec cpu=%d" % \
+               (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
+       for i in range(len(irq_list)):
+               print PF_IRQ_ENTRY % \
+                       (diff_msec(base_t, irq_list[i]['irq_ent_t']),
+                       irq_list[i]['irq'], irq_list[i]['name'])
+               print PF_JOINT
+               irq_event_list = irq_list[i]['event_list']
+               for j in range(len(irq_event_list)):
+                       irq_event = irq_event_list[j]
+                       if irq_event['event'] == 'netif_rx':
+                               print PF_NET_RX % \
+                                       (diff_msec(base_t, irq_event['time']),
+                                       irq_event['skbaddr'])
+                               print PF_JOINT
+       print PF_SOFT_ENTRY % \
+               diff_msec(base_t, hunk['sirq_ent_t'])
+       print PF_JOINT
+       event_list = hunk['event_list']
+       for i in range(len(event_list)):
+               event = event_list[i]
+               if event['event_name'] == 'napi_poll':
+                       print PF_NAPI_POLL % \
+                           (diff_msec(base_t, event['event_t']), event['dev'])
+                       if i == len(event_list) - 1:
+                               print ""
+                       else:
+                               print PF_JOINT
+               else:
+                       print PF_NET_RECV % \
+                           (diff_msec(base_t, event['event_t']), event['skbaddr'],
+                               event['len'])
+                       if 'comm' in event.keys():
+                               print PF_WJOINT
+                               print PF_CPY_DGRAM % \
+                                       (diff_msec(base_t, event['comm_t']),
+                                       event['pid'], event['comm'])
+                       elif 'handle' in event.keys():
+                               print PF_WJOINT
+                               if event['handle'] == "kfree_skb":
+                                       print PF_KFREE_SKB % \
+                                               (diff_msec(base_t,
+                                               event['comm_t']),
+                                               event['location'])
+                               elif event['handle'] == "consume_skb":
+                                       print PF_CONS_SKB % \
+                                               diff_msec(base_t,
+                                                       event['comm_t'])
+                       print PF_JOINT
+
+def trace_begin():
+       global show_tx
+       global show_rx
+       global dev
+       global debug
+
+       for i in range(len(sys.argv)):
+               if i == 0:
+                       continue
+               arg = sys.argv[i]
+               if arg == 'tx':
+                       show_tx = 1
+               elif arg =='rx':
+                       show_rx = 1
+               elif arg.find('dev=',0, 4) >= 0:
+                       dev = arg[4:]
+               elif arg == 'debug':
+                       debug = 1
+       if show_tx == 0  and show_rx == 0:
+               show_tx = 1
+               show_rx = 1
+
+def trace_end():
+       # order all events in time
+       all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
+                                           b[EINFO_IDX_TIME]))
+       # process all events
+       for i in range(len(all_event_list)):
+               event_info = all_event_list[i]
+               name = event_info[EINFO_IDX_NAME]
+               if name == 'irq__softirq_exit':
+                       handle_irq_softirq_exit(event_info)
+               elif name == 'irq__softirq_entry':
+                       handle_irq_softirq_entry(event_info)
+               elif name == 'irq__softirq_raise':
+                       handle_irq_softirq_raise(event_info)
+               elif name == 'irq__irq_handler_entry':
+                       handle_irq_handler_entry(event_info)
+               elif name == 'irq__irq_handler_exit':
+                       handle_irq_handler_exit(event_info)
+               elif name == 'napi__napi_poll':
+                       handle_napi_poll(event_info)
+               elif name == 'net__netif_receive_skb':
+                       handle_netif_receive_skb(event_info)
+               elif name == 'net__netif_rx':
+                       handle_netif_rx(event_info)
+               elif name == 'skb__skb_copy_datagram_iovec':
+                       handle_skb_copy_datagram_iovec(event_info)
+               elif name == 'net__net_dev_queue':
+                       handle_net_dev_queue(event_info)
+               elif name == 'net__net_dev_xmit':
+                       handle_net_dev_xmit(event_info)
+               elif name == 'skb__kfree_skb':
+                       handle_kfree_skb(event_info)
+               elif name == 'skb__consume_skb':
+                       handle_consume_skb(event_info)
+       # display receive hunks
+       if show_rx:
+               for i in range(len(receive_hunk_list)):
+                       print_receive(receive_hunk_list[i])
+       # display transmit hunks
+       if show_tx:
+               print "   dev    len      Qdisc        " \
+                       "       netdevice             free"
+               for i in range(len(tx_free_list)):
+                       print_transmit(tx_free_list[i])
+       if debug:
+               print "debug buffer status"
+               print "----------------------------"
+               print "xmit Qdisc:remain:%d overflow:%d" % \
+                       (len(tx_queue_list), of_count_tx_queue_list)
+               print "xmit netdevice:remain:%d overflow:%d" % \
+                       (len(tx_xmit_list), of_count_tx_xmit_list)
+               print "receive:remain:%d overflow:%d" % \
+                       (len(rx_skb_list), of_count_rx_skb_list)
+
+# called from perf, when it finds a correspoinding event
+def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
+       if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+               return
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+       all_event_list.append(event_info)
+
+def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
+       if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+               return
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+       all_event_list.append(event_info)
+
+def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
+       if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+               return
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+       all_event_list.append(event_info)
+
+def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
+                       irq, irq_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       irq, irq_name)
+       all_event_list.append(event_info)
+
+def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
+       all_event_list.append(event_info)
+
+def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       napi, dev_name)
+       all_event_list.append(event_info)
+
+def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
+                       skblen, dev_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, skblen, dev_name)
+       all_event_list.append(event_info)
+
+def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
+                       skblen, dev_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, skblen, dev_name)
+       all_event_list.append(event_info)
+
+def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
+                       skbaddr, skblen, dev_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, skblen, dev_name)
+       all_event_list.append(event_info)
+
+def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
+                       skbaddr, skblen, rc, dev_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, skblen, rc ,dev_name)
+       all_event_list.append(event_info)
+
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
+                       skbaddr, protocol, location):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, protocol, location)
+       all_event_list.append(event_info)
+
+def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr)
+       all_event_list.append(event_info)
+
+def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
+       skbaddr, skblen):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, skblen)
+       all_event_list.append(event_info)
+
+def handle_irq_handler_entry(event_info):
+       (name, context, cpu, time, pid, comm, irq, irq_name) = event_info
+       if cpu not in irq_dic.keys():
+               irq_dic[cpu] = []
+       irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
+       irq_dic[cpu].append(irq_record)
+
+def handle_irq_handler_exit(event_info):
+       (name, context, cpu, time, pid, comm, irq, ret) = event_info
+       if cpu not in irq_dic.keys():
+               return
+       irq_record = irq_dic[cpu].pop()
+       if irq != irq_record['irq']:
+               return
+       irq_record.update({'irq_ext_t':time})
+       # if an irq doesn't include NET_RX softirq, drop.
+       if 'event_list' in irq_record.keys():
+               irq_dic[cpu].append(irq_record)
+
+def handle_irq_softirq_raise(event_info):
+       (name, context, cpu, time, pid, comm, vec) = event_info
+       if cpu not in irq_dic.keys() \
+       or len(irq_dic[cpu]) == 0:
+               return
+       irq_record = irq_dic[cpu].pop()
+       if 'event_list' in irq_record.keys():
+               irq_event_list = irq_record['event_list']
+       else:
+               irq_event_list = []
+       irq_event_list.append({'time':time, 'event':'sirq_raise'})
+       irq_record.update({'event_list':irq_event_list})
+       irq_dic[cpu].append(irq_record)
+
+def handle_irq_softirq_entry(event_info):
+       (name, context, cpu, time, pid, comm, vec) = event_info
+       net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
+
+def handle_irq_softirq_exit(event_info):
+       (name, context, cpu, time, pid, comm, vec) = event_info
+       irq_list = []
+       event_list = 0
+       if cpu in irq_dic.keys():
+               irq_list = irq_dic[cpu]
+               del irq_dic[cpu]
+       if cpu in net_rx_dic.keys():
+               sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
+               event_list = net_rx_dic[cpu]['event_list']
+               del net_rx_dic[cpu]
+       if irq_list == [] or event_list == 0:
+               return
+       rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
+                   'irq_list':irq_list, 'event_list':event_list}
+       # merge information realted to a NET_RX softirq
+       receive_hunk_list.append(rec_data)
+
+def handle_napi_poll(event_info):
+       (name, context, cpu, time, pid, comm, napi, dev_name) = event_info
+       if cpu in net_rx_dic.keys():
+               event_list = net_rx_dic[cpu]['event_list']
+               rec_data = {'event_name':'napi_poll',
+                               'dev':dev_name, 'event_t':time}
+               event_list.append(rec_data)
+
+def handle_netif_rx(event_info):
+       (name, context, cpu, time, pid, comm,
+               skbaddr, skblen, dev_name) = event_info
+       if cpu not in irq_dic.keys() \
+       or len(irq_dic[cpu]) == 0:
+               return
+       irq_record = irq_dic[cpu].pop()
+       if 'event_list' in irq_record.keys():
+               irq_event_list = irq_record['event_list']
+       else:
+               irq_event_list = []
+       irq_event_list.append({'time':time, 'event':'netif_rx',
+               'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
+       irq_record.update({'event_list':irq_event_list})
+       irq_dic[cpu].append(irq_record)
+
+def handle_netif_receive_skb(event_info):
+       global of_count_rx_skb_list
+
+       (name, context, cpu, time, pid, comm,
+               skbaddr, skblen, dev_name) = event_info
+       if cpu in net_rx_dic.keys():
+               rec_data = {'event_name':'netif_receive_skb',
+                           'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+               event_list = net_rx_dic[cpu]['event_list']
+               event_list.append(rec_data)
+               rx_skb_list.insert(0, rec_data)
+               if len(rx_skb_list) > buffer_budget:
+                       rx_skb_list.pop()
+                       of_count_rx_skb_list += 1
+
+def handle_net_dev_queue(event_info):
+       global of_count_tx_queue_list
+
+       (name, context, cpu, time, pid, comm,
+               skbaddr, skblen, dev_name) = event_info
+       skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
+       tx_queue_list.insert(0, skb)
+       if len(tx_queue_list) > buffer_budget:
+               tx_queue_list.pop()
+               of_count_tx_queue_list += 1
+
+def handle_net_dev_xmit(event_info):
+       global of_count_tx_xmit_list
+
+       (name, context, cpu, time, pid, comm,
+               skbaddr, skblen, rc, dev_name) = event_info
+       if rc == 0: # NETDEV_TX_OK
+               for i in range(len(tx_queue_list)):
+                       skb = tx_queue_list[i]
+                       if skb['skbaddr'] == skbaddr:
+                               skb['xmit_t'] = time
+                               tx_xmit_list.insert(0, skb)
+                               del tx_queue_list[i]
+                               if len(tx_xmit_list) > buffer_budget:
+                                       tx_xmit_list.pop()
+                                       of_count_tx_xmit_list += 1
+                               return
+
+def handle_kfree_skb(event_info):
+       (name, context, cpu, time, pid, comm,
+               skbaddr, protocol, location) = event_info
+       for i in range(len(tx_queue_list)):
+               skb = tx_queue_list[i]
+               if skb['skbaddr'] == skbaddr:
+                       del tx_queue_list[i]
+                       return
+       for i in range(len(tx_xmit_list)):
+               skb = tx_xmit_list[i]
+               if skb['skbaddr'] == skbaddr:
+                       skb['free_t'] = time
+                       tx_free_list.append(skb)
+                       del tx_xmit_list[i]
+                       return
+       for i in range(len(rx_skb_list)):
+               rec_data = rx_skb_list[i]
+               if rec_data['skbaddr'] == skbaddr:
+                       rec_data.update({'handle':"kfree_skb",
+                                       'comm':comm, 'pid':pid, 'comm_t':time})
+                       del rx_skb_list[i]
+                       return
+
+def handle_consume_skb(event_info):
+       (name, context, cpu, time, pid, comm, skbaddr) = event_info
+       for i in range(len(tx_xmit_list)):
+               skb = tx_xmit_list[i]
+               if skb['skbaddr'] == skbaddr:
+                       skb['free_t'] = time
+                       tx_free_list.append(skb)
+                       del tx_xmit_list[i]
+                       return
+
+def handle_skb_copy_datagram_iovec(event_info):
+       (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
+       for i in range(len(rx_skb_list)):
+               rec_data = rx_skb_list[i]
+               if skbaddr == rec_data['skbaddr']:
+                       rec_data.update({'handle':"skb_copy_datagram_iovec",
+                                       'comm':comm, 'pid':pid, 'comm_t':time})
+                       del rx_skb_list[i]
+                       return
index 27e9ebe4076e0efbf4117a46f2dbbcc74a1dcc3e..a7729797fd96254bc35326077337a71f919c19b5 100644 (file)
@@ -82,6 +82,8 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2
 extern char *perf_pathdup(const char *fmt, ...)
        __attribute__((format (printf, 1, 2)));
 
+#ifdef NO_STRLCPY
 extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif
 
 #endif /* __PERF_CACHE_H */
index f231f43424d27930a286cb52902c21cb4534a068..e12d539417b2cc4644e2d5a919cfb8a23e8ae163 100644 (file)
@@ -28,6 +28,9 @@ bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
 #define chain_for_each_child(child, parent)    \
        list_for_each_entry(child, &parent->children, brothers)
 
+#define chain_for_each_child_safe(child, next, parent) \
+       list_for_each_entry_safe(child, next, &parent->children, brothers)
+
 static void
 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
                    enum chain_mode mode)
@@ -86,10 +89,10 @@ __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
  * sort them by hit
  */
 static void
-sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
+sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
                u64 min_hit, struct callchain_param *param __used)
 {
-       __sort_chain_flat(rb_root, node, min_hit);
+       __sort_chain_flat(rb_root, &root->node, min_hit);
 }
 
 static void __sort_chain_graph_abs(struct callchain_node *node,
@@ -108,11 +111,11 @@ static void __sort_chain_graph_abs(struct callchain_node *node,
 }
 
 static void
-sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_node *chain_root,
+sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
                     u64 min_hit, struct callchain_param *param __used)
 {
-       __sort_chain_graph_abs(chain_root, min_hit);
-       rb_root->rb_node = chain_root->rb_root.rb_node;
+       __sort_chain_graph_abs(&chain_root->node, min_hit);
+       rb_root->rb_node = chain_root->node.rb_root.rb_node;
 }
 
 static void __sort_chain_graph_rel(struct callchain_node *node,
@@ -133,11 +136,11 @@ static void __sort_chain_graph_rel(struct callchain_node *node,
 }
 
 static void
-sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_node *chain_root,
+sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
                     u64 min_hit __used, struct callchain_param *param)
 {
-       __sort_chain_graph_rel(chain_root, param->min_percent / 100.0);
-       rb_root->rb_node = chain_root->rb_root.rb_node;
+       __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
+       rb_root->rb_node = chain_root->node.rb_root.rb_node;
 }
 
 int register_callchain_param(struct callchain_param *param)
@@ -284,19 +287,18 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
 }
 
 static int
-__append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start, u64 period);
+append_chain(struct callchain_node *root, struct resolved_chain *chain,
+            unsigned int start, u64 period);
 
 static void
-__append_chain_children(struct callchain_node *root,
-                       struct resolved_chain *chain,
-                       unsigned int start, u64 period)
+append_chain_children(struct callchain_node *root, struct resolved_chain *chain,
+                     unsigned int start, u64 period)
 {
        struct callchain_node *rnode;
 
        /* lookup in childrens */
        chain_for_each_child(rnode, root) {
-               unsigned int ret = __append_chain(rnode, chain, start, period);
+               unsigned int ret = append_chain(rnode, chain, start, period);
 
                if (!ret)
                        goto inc_children_hit;
@@ -309,8 +311,8 @@ inc_children_hit:
 }
 
 static int
-__append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start, u64 period)
+append_chain(struct callchain_node *root, struct resolved_chain *chain,
+            unsigned int start, u64 period)
 {
        struct callchain_list *cnode;
        unsigned int i = start;
@@ -357,7 +359,7 @@ __append_chain(struct callchain_node *root, struct resolved_chain *chain,
        }
 
        /* We match the node and still have a part remaining */
-       __append_chain_children(root, chain, i, period);
+       append_chain_children(root, chain, i, period);
 
        return 0;
 }
@@ -380,8 +382,8 @@ static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
 }
 
 
-int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms, u64 period)
+int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
+                    struct map_symbol *syms, u64 period)
 {
        struct resolved_chain *filtered;
 
@@ -398,9 +400,65 @@ int append_chain(struct callchain_node *root, struct ip_callchain *chain,
        if (!filtered->nr)
                goto end;
 
-       __append_chain_children(root, filtered, 0, period);
+       append_chain_children(&root->node, filtered, 0, period);
+
+       if (filtered->nr > root->max_depth)
+               root->max_depth = filtered->nr;
 end:
        free(filtered);
 
        return 0;
 }
+
+static int
+merge_chain_branch(struct callchain_node *dst, struct callchain_node *src,
+                  struct resolved_chain *chain)
+{
+       struct callchain_node *child, *next_child;
+       struct callchain_list *list, *next_list;
+       int old_pos = chain->nr;
+       int err = 0;
+
+       list_for_each_entry_safe(list, next_list, &src->val, list) {
+               chain->ips[chain->nr].ip = list->ip;
+               chain->ips[chain->nr].ms = list->ms;
+               chain->nr++;
+               list_del(&list->list);
+               free(list);
+       }
+
+       if (src->hit)
+               append_chain_children(dst, chain, 0, src->hit);
+
+       chain_for_each_child_safe(child, next_child, src) {
+               err = merge_chain_branch(dst, child, chain);
+               if (err)
+                       break;
+
+               list_del(&child->brothers);
+               free(child);
+       }
+
+       chain->nr = old_pos;
+
+       return err;
+}
+
+int callchain_merge(struct callchain_root *dst, struct callchain_root *src)
+{
+       struct resolved_chain *chain;
+       int err;
+
+       chain = malloc(sizeof(*chain) +
+                      src->max_depth * sizeof(struct resolved_ip));
+       if (!chain)
+               return -ENOMEM;
+
+       chain->nr = 0;
+
+       err = merge_chain_branch(&dst->node, &src->node, chain);
+
+       free(chain);
+
+       return err;
+}
index 6de4313924fb5c510f354cb3a8e2a73061e103f6..c15fb8c24ad2b87388e97cd6346cfdebaac11dd5 100644 (file)
@@ -26,9 +26,14 @@ struct callchain_node {
        u64                     children_hit;
 };
 
+struct callchain_root {
+       u64                     max_depth;
+       struct callchain_node   node;
+};
+
 struct callchain_param;
 
-typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_node *,
+typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
                                 u64, struct callchain_param *);
 
 struct callchain_param {
@@ -44,15 +49,16 @@ struct callchain_list {
        struct list_head        list;
 };
 
-static inline void callchain_init(struct callchain_node *node)
+static inline void callchain_init(struct callchain_root *root)
 {
-       INIT_LIST_HEAD(&node->brothers);
-       INIT_LIST_HEAD(&node->children);
-       INIT_LIST_HEAD(&node->val);
+       INIT_LIST_HEAD(&root->node.brothers);
+       INIT_LIST_HEAD(&root->node.children);
+       INIT_LIST_HEAD(&root->node.val);
 
-       node->children_hit = 0;
-       node->parent = NULL;
-       node->hit = 0;
+       root->node.parent = NULL;
+       root->node.hit = 0;
+       root->node.children_hit = 0;
+       root->max_depth = 0;
 }
 
 static inline u64 cumul_hits(struct callchain_node *node)
@@ -61,8 +67,9 @@ static inline u64 cumul_hits(struct callchain_node *node)
 }
 
 int register_callchain_param(struct callchain_param *param);
-int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms, u64 period);
+int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
+                    struct map_symbol *syms, u64 period);
+int callchain_merge(struct callchain_root *dst, struct callchain_root *src);
 
 bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
 #endif /* __PERF_CALLCHAIN_H */
index be22ae6ef0558009c0a1faaa4f55bcf2c5d828ca..2022e87409942ca4b0d133c3f889e41178a663d1 100644 (file)
@@ -87,7 +87,7 @@ static void hist_entry__add_cpumode_period(struct hist_entry *self,
 
 static struct hist_entry *hist_entry__new(struct hist_entry *template)
 {
-       size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
+       size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
        struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
 
        if (self != NULL) {
@@ -226,6 +226,8 @@ static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
 
                if (!cmp) {
                        iter->period += he->period;
+                       if (symbol_conf.use_callchain)
+                               callchain_merge(iter->callchain, he->callchain);
                        hist_entry__free(he);
                        return false;
                }
index 58a470d036dd0917c16eda49fb8b1987703ca7b5..bd74977114242ff465af39a291d30aa7d463f3b2 100644 (file)
@@ -22,6 +22,7 @@ static const char *get_perf_dir(void)
        return ".";
 }
 
+#ifdef NO_STRLCPY
 size_t strlcpy(char *dest, const char *src, size_t size)
 {
        size_t ret = strlen(src);
@@ -33,7 +34,7 @@ size_t strlcpy(char *dest, const char *src, size_t size)
        }
        return ret;
 }
-
+#endif
 
 static char *get_pathname(void)
 {
index 46e531d09e8bfcbe1064a4307ce5a75ef72a6405..0b91053a7d11af888eea81a4c8de24fdd60ce6f8 100644 (file)
@@ -70,7 +70,7 @@ struct hist_entry {
                struct hist_entry *pair;
                struct rb_root    sorted_chain;
        };
-       struct callchain_node   callchain[0];
+       struct callchain_root   callchain[0];
 };
 
 enum sort_type {
index b2f5ae97f33dded1cdaba3e843ab2df888515777..b39f499e575a604198bf1bb11d11d6280a091548 100644 (file)
@@ -388,6 +388,20 @@ size_t dso__fprintf_buildid(struct dso *self, FILE *fp)
        return fprintf(fp, "%s", sbuild_id);
 }
 
+size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp)
+{
+       size_t ret = 0;
+       struct rb_node *nd;
+       struct symbol_name_rb_node *pos;
+
+       for (nd = rb_first(&self->symbol_names[type]); nd; nd = rb_next(nd)) {
+               pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
+               fprintf(fp, "%s\n", pos->sym.name);
+       }
+
+       return ret;
+}
+
 size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
 {
        struct rb_node *nd;
index ea95c2756f05e73d09f6868f24b174ed1221f59a..038f2201ee09579ca3f460d9f59576770ea477d2 100644 (file)
@@ -182,6 +182,7 @@ size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp);
 size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits);
 
 size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
+size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp);
 size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
 
 enum dso_origin {
index 7ea983acfaea521a425f7eb33250e319b0a3a960..f7af2fca965d5c206973f73ccd25b8b3607e02e5 100644 (file)
@@ -97,7 +97,7 @@ void setup_python_scripting(void)
        register_python_scripting(&python_scripting_unsupported_ops);
 }
 #else
-struct scripting_ops python_scripting_ops;
+extern struct scripting_ops python_scripting_ops;
 
 void setup_python_scripting(void)
 {
@@ -158,7 +158,7 @@ void setup_perl_scripting(void)
        register_perl_scripting(&perl_scripting_unsupported_ops);
 }
 #else
-struct scripting_ops perl_scripting_ops;
+extern struct scripting_ops perl_scripting_ops;
 
 void setup_perl_scripting(void)
 {
index 66f2d583d8c4326971dc9d37cd47df57eeb82306..6d0df809a2edab24f28af4bea093d1cda3c2614d 100644 (file)
@@ -1,16 +1,6 @@
-#define _GNU_SOURCE
-#include <stdio.h>
-#undef _GNU_SOURCE
-/*
- * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
- * the build if it isn't defined. Use the equivalent one that glibc
- * has on features.h.
- */
-#include <features.h>
-#ifndef HAVE_LONG_LONG
-#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
-#endif
 #include <slang.h>
+#include "libslang.h"
+#include <linux/compiler.h>
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <stdlib.h>
@@ -19,17 +9,9 @@
 #include "helpline.h"
 #include "../color.h"
 #include "../util.h"
+#include <stdio.h>
 
-#if SLANG_VERSION < 20104
-#define sltt_set_color(obj, name, fg, bg) \
-       SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
-#else
-#define sltt_set_color SLtt_set_color
-#endif
-
-newtComponent newt_form__new(void);
-
-int ui_browser__percent_color(double percent, bool current)
+static int ui_browser__percent_color(double percent, bool current)
 {
        if (current)
                return HE_COLORSET_SELECTED;
@@ -40,6 +22,23 @@ int ui_browser__percent_color(double percent, bool current)
        return HE_COLORSET_NORMAL;
 }
 
+void ui_browser__set_color(struct ui_browser *self __used, int color)
+{
+       SLsmg_set_color(color);
+}
+
+void ui_browser__set_percent_color(struct ui_browser *self,
+                                  double percent, bool current)
+{
+        int color = ui_browser__percent_color(percent, current);
+        ui_browser__set_color(self, color);
+}
+
+void ui_browser__gotorc(struct ui_browser *self, int y, int x)
+{
+       SLsmg_gotorc(self->y + y, self->x + x);
+}
+
 void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
 {
        struct list_head *head = self->entries;
@@ -111,7 +110,7 @@ unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
        nd = self->top;
 
        while (nd != NULL) {
-               SLsmg_gotorc(self->y + row, self->x);
+               ui_browser__gotorc(self, row, 0);
                self->write(self, nd, row);
                if (++row == self->height)
                        break;
@@ -131,13 +130,10 @@ void ui_browser__refresh_dimensions(struct ui_browser *self)
        int cols, rows;
        newtGetScreenSize(&cols, &rows);
 
-       if (self->width > cols - 4)
-               self->width = cols - 4;
-       self->height = rows - 5;
-       if (self->height > self->nr_entries)
-               self->height = self->nr_entries;
-       self->y  = (rows - self->height) / 2;
-       self->x = (cols - self->width) / 2;
+       self->width = cols - 1;
+       self->height = rows - 2;
+       self->y = 1;
+       self->x = 0;
 }
 
 void ui_browser__reset_index(struct ui_browser *self)
@@ -146,34 +142,48 @@ void ui_browser__reset_index(struct ui_browser *self)
        self->seek(self, 0, SEEK_SET);
 }
 
+void ui_browser__add_exit_key(struct ui_browser *self, int key)
+{
+       newtFormAddHotKey(self->form, key);
+}
+
+void ui_browser__add_exit_keys(struct ui_browser *self, int keys[])
+{
+       int i = 0;
+
+       while (keys[i] && i < 64) {
+               ui_browser__add_exit_key(self, keys[i]);
+               ++i;
+       }
+}
+
 int ui_browser__show(struct ui_browser *self, const char *title,
                     const char *helpline, ...)
 {
        va_list ap;
+       int keys[] = { NEWT_KEY_UP, NEWT_KEY_DOWN, NEWT_KEY_PGUP,
+                      NEWT_KEY_PGDN, NEWT_KEY_HOME, NEWT_KEY_END, ' ',
+                      NEWT_KEY_LEFT, NEWT_KEY_ESCAPE, 'q', CTRL('c'), 0 };
 
-       if (self->form != NULL) {
+       if (self->form != NULL)
                newtFormDestroy(self->form);
-               newtPopWindow();
-       }
+
        ui_browser__refresh_dimensions(self);
-       newtCenteredWindow(self->width, self->height, title);
-       self->form = newt_form__new();
+       self->form = newtForm(NULL, NULL, 0);
        if (self->form == NULL)
                return -1;
 
-       self->sb = newtVerticalScrollbar(self->width, 0, self->height,
+       self->sb = newtVerticalScrollbar(self->width, 1, self->height,
                                         HE_COLORSET_NORMAL,
                                         HE_COLORSET_SELECTED);
        if (self->sb == NULL)
                return -1;
 
-       newtFormAddHotKey(self->form, NEWT_KEY_UP);
-       newtFormAddHotKey(self->form, NEWT_KEY_DOWN);
-       newtFormAddHotKey(self->form, NEWT_KEY_PGUP);
-       newtFormAddHotKey(self->form, NEWT_KEY_PGDN);
-       newtFormAddHotKey(self->form, NEWT_KEY_HOME);
-       newtFormAddHotKey(self->form, NEWT_KEY_END);
-       newtFormAddHotKey(self->form, ' ');
+       SLsmg_gotorc(0, 0);
+       ui_browser__set_color(self, NEWT_COLORSET_ROOT);
+       slsmg_write_nstring(title, self->width);
+
+       ui_browser__add_exit_keys(self, keys);
        newtFormAddComponent(self->form, self->sb);
 
        va_start(ap, helpline);
@@ -185,7 +195,6 @@ int ui_browser__show(struct ui_browser *self, const char *title,
 void ui_browser__hide(struct ui_browser *self)
 {
        newtFormDestroy(self->form);
-       newtPopWindow();
        self->form = NULL;
        ui_helpline__pop();
 }
@@ -196,28 +205,28 @@ int ui_browser__refresh(struct ui_browser *self)
 
        newtScrollbarSet(self->sb, self->index, self->nr_entries - 1);
        row = self->refresh(self);
-       SLsmg_set_color(HE_COLORSET_NORMAL);
+       ui_browser__set_color(self, HE_COLORSET_NORMAL);
        SLsmg_fill_region(self->y + row, self->x,
                          self->height - row, self->width, ' ');
 
        return 0;
 }
 
-int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es)
+int ui_browser__run(struct ui_browser *self)
 {
+       struct newtExitStruct es;
+
        if (ui_browser__refresh(self) < 0)
                return -1;
 
        while (1) {
                off_t offset;
 
-               newtFormRun(self->form, es);
+               newtFormRun(self->form, &es);
 
-               if (es->reason != NEWT_EXIT_HOTKEY)
+               if (es.reason != NEWT_EXIT_HOTKEY)
                        break;
-               if (is_exit_key(es->u.key))
-                       return es->u.key;
-               switch (es->u.key) {
+               switch (es.u.key) {
                case NEWT_KEY_DOWN:
                        if (self->index == self->nr_entries - 1)
                                break;
@@ -274,12 +283,12 @@ int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es)
                        self->seek(self, -offset, SEEK_END);
                        break;
                default:
-                       return es->u.key;
+                       return es.u.key;
                }
                if (ui_browser__refresh(self) < 0)
                        return -1;
        }
-       return 0;
+       return -1;
 }
 
 unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
@@ -294,7 +303,7 @@ unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
        pos = self->top;
 
        list_for_each_from(pos, head) {
-               SLsmg_gotorc(self->y + row, self->x);
+               ui_browser__gotorc(self, row, 0);
                self->write(self, pos, row);
                if (++row == self->height)
                        break;
index 0b9f829214f756ec16227745835cfea55d7ee503..0dc7e4da36f52c42ef3574dc89dce7102ae8438d 100644 (file)
@@ -25,16 +25,21 @@ struct ui_browser {
 };
 
 
-int ui_browser__percent_color(double percent, bool current);
+void ui_browser__set_color(struct ui_browser *self, int color);
+void ui_browser__set_percent_color(struct ui_browser *self,
+                                  double percent, bool current);
 bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row);
 void ui_browser__refresh_dimensions(struct ui_browser *self);
 void ui_browser__reset_index(struct ui_browser *self);
 
+void ui_browser__gotorc(struct ui_browser *self, int y, int x);
+void ui_browser__add_exit_key(struct ui_browser *self, int key);
+void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]);
 int ui_browser__show(struct ui_browser *self, const char *title,
                     const char *helpline, ...);
 void ui_browser__hide(struct ui_browser *self);
 int ui_browser__refresh(struct ui_browser *self);
-int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es);
+int ui_browser__run(struct ui_browser *self);
 
 void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence);
 unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self);
index a90273e63f4fb6939ea64e074513e1afabb1f289..82b78f99251bb2b764165cf8066a85f1e6e4b97d 100644 (file)
@@ -40,14 +40,12 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
 
        if (ol->offset != -1) {
                struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
-               int color = ui_browser__percent_color(olrb->percent, current_entry);
-               SLsmg_set_color(color);
+               ui_browser__set_percent_color(self, olrb->percent, current_entry);
                slsmg_printf(" %7.2f ", olrb->percent);
                if (!current_entry)
-                       SLsmg_set_color(HE_COLORSET_CODE);
+                       ui_browser__set_color(self, HE_COLORSET_CODE);
        } else {
-               int color = ui_browser__percent_color(0, current_entry);
-               SLsmg_set_color(color);
+               ui_browser__set_percent_color(self, 0, current_entry);
                slsmg_write_nstring(" ", 9);
        }
 
@@ -135,32 +133,31 @@ static void annotate_browser__set_top(struct annotate_browser *self,
        self->curr_hot = nd;
 }
 
-static int annotate_browser__run(struct annotate_browser *self,
-                                struct newtExitStruct *es)
+static int annotate_browser__run(struct annotate_browser *self)
 {
        struct rb_node *nd;
        struct hist_entry *he = self->b.priv;
+       int key;
 
        if (ui_browser__show(&self->b, he->ms.sym->name,
-                            "<- or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
+                            "<-, -> or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
                return -1;
-
-       newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
-       newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
+       /*
+        * To allow builtin-annotate to cycle thru multiple symbols by
+        * examining the exit key for this function.
+        */
+       ui_browser__add_exit_key(&self->b, NEWT_KEY_RIGHT);
 
        nd = self->curr_hot;
        if (nd) {
-               newtFormAddHotKey(self->b.form, NEWT_KEY_TAB);
-               newtFormAddHotKey(self->b.form, NEWT_KEY_UNTAB);
+               int tabs[] = { NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0 };
+               ui_browser__add_exit_keys(&self->b, tabs);
        }
 
        while (1) {
-               ui_browser__run(&self->b, es);
-
-               if (es->reason != NEWT_EXIT_HOTKEY)
-                       break;
+               key = ui_browser__run(&self->b);
 
-               switch (es->u.key) {
+               switch (key) {
                case NEWT_KEY_TAB:
                        nd = rb_prev(nd);
                        if (nd == NULL)
@@ -179,12 +176,11 @@ static int annotate_browser__run(struct annotate_browser *self,
        }
 out:
        ui_browser__hide(&self->b);
-       return es->u.key;
+       return key;
 }
 
 int hist_entry__tui_annotate(struct hist_entry *self)
 {
-       struct newtExitStruct es;
        struct objdump_line *pos, *n;
        struct objdump_line_rb_node *rbpos;
        LIST_HEAD(head);
@@ -232,7 +228,7 @@ int hist_entry__tui_annotate(struct hist_entry *self)
                annotate_browser__set_top(&browser, browser.curr_hot);
 
        browser.b.width += 18; /* Percentage */
-       ret = annotate_browser__run(&browser, &es);
+       ret = annotate_browser__run(&browser);
        list_for_each_entry_safe(pos, n, &head, node) {
                list_del(&pos->node);
                objdump_line__free(pos);
index dafdf6775d77f44d69abf1980b1a9cfe4ab053dc..ebda8c3fde9e6468ddbd84fc2df7e324ba862854 100644 (file)
@@ -58,6 +58,11 @@ static char callchain_list__folded(const struct callchain_list *self)
        return map_symbol__folded(&self->ms);
 }
 
+static void map_symbol__set_folding(struct map_symbol *self, bool unfold)
+{
+       self->unfolded = unfold ? self->has_children : false;
+}
+
 static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
 {
        int n = 0;
@@ -129,16 +134,16 @@ static void callchain_node__init_have_children_rb_tree(struct callchain_node *se
        for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
                struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
                struct callchain_list *chain;
-               int first = true;
+               bool first = true;
 
                list_for_each_entry(chain, &child->val, list) {
                        if (first) {
                                first = false;
                                chain->ms.has_children = chain->list.next != &child->val ||
-                                                        rb_first(&child->rb_root) != NULL;
+                                                        !RB_EMPTY_ROOT(&child->rb_root);
                        } else
                                chain->ms.has_children = chain->list.next == &child->val &&
-                                                        rb_first(&child->rb_root) != NULL;
+                                                        !RB_EMPTY_ROOT(&child->rb_root);
                }
 
                callchain_node__init_have_children_rb_tree(child);
@@ -150,7 +155,7 @@ static void callchain_node__init_have_children(struct callchain_node *self)
        struct callchain_list *chain;
 
        list_for_each_entry(chain, &self->val, list)
-               chain->ms.has_children = rb_first(&self->rb_root) != NULL;
+               chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root);
 
        callchain_node__init_have_children_rb_tree(self);
 }
@@ -168,6 +173,7 @@ static void callchain__init_have_children(struct rb_root *self)
 static void hist_entry__init_have_children(struct hist_entry *self)
 {
        if (!self->init_have_children) {
+               self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain);
                callchain__init_have_children(&self->sorted_chain);
                self->init_have_children = true;
        }
@@ -195,43 +201,114 @@ static bool hist_browser__toggle_fold(struct hist_browser *self)
        return false;
 }
 
-static int hist_browser__run(struct hist_browser *self, const char *title,
-                            struct newtExitStruct *es)
+static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold)
+{
+       int n = 0;
+       struct rb_node *nd;
+
+       for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+               struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
+               struct callchain_list *chain;
+               bool has_children = false;
+
+               list_for_each_entry(chain, &child->val, list) {
+                       ++n;
+                       map_symbol__set_folding(&chain->ms, unfold);
+                       has_children = chain->ms.has_children;
+               }
+
+               if (has_children)
+                       n += callchain_node__set_folding_rb_tree(child, unfold);
+       }
+
+       return n;
+}
+
+static int callchain_node__set_folding(struct callchain_node *node, bool unfold)
+{
+       struct callchain_list *chain;
+       bool has_children = false;
+       int n = 0;
+
+       list_for_each_entry(chain, &node->val, list) {
+               ++n;
+               map_symbol__set_folding(&chain->ms, unfold);
+               has_children = chain->ms.has_children;
+       }
+
+       if (has_children)
+               n += callchain_node__set_folding_rb_tree(node, unfold);
+
+       return n;
+}
+
+static int callchain__set_folding(struct rb_root *chain, bool unfold)
+{
+       struct rb_node *nd;
+       int n = 0;
+
+       for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+               struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+               n += callchain_node__set_folding(node, unfold);
+       }
+
+       return n;
+}
+
+static void hist_entry__set_folding(struct hist_entry *self, bool unfold)
+{
+       hist_entry__init_have_children(self);
+       map_symbol__set_folding(&self->ms, unfold);
+
+       if (self->ms.has_children) {
+               int n = callchain__set_folding(&self->sorted_chain, unfold);
+               self->nr_rows = unfold ? n : 0;
+       } else
+               self->nr_rows = 0;
+}
+
+static void hists__set_folding(struct hists *self, bool unfold)
+{
+       struct rb_node *nd;
+
+       self->nr_entries = 0;
+
+       for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+               struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
+               hist_entry__set_folding(he, unfold);
+               self->nr_entries += 1 + he->nr_rows;
+       }
+}
+
+static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
+{
+       hists__set_folding(self->hists, unfold);
+       self->b.nr_entries = self->hists->nr_entries;
+       /* Go to the start, we may be way after valid entries after a collapse */
+       ui_browser__reset_index(&self->b);
+}
+
+static int hist_browser__run(struct hist_browser *self, const char *title)
 {
-       char str[256], unit;
-       unsigned long nr_events = self->hists->stats.nr_events[PERF_RECORD_SAMPLE];
+       int key;
+       int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't',
+                           NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, 0, };
 
        self->b.entries = &self->hists->entries;
        self->b.nr_entries = self->hists->nr_entries;
 
        hist_browser__refresh_dimensions(self);
 
-       nr_events = convert_unit(nr_events, &unit);
-       snprintf(str, sizeof(str), "Events: %lu%c                            ",
-                nr_events, unit);
-       newtDrawRootText(0, 0, str);
-
        if (ui_browser__show(&self->b, title,
                             "Press '?' for help on key bindings") < 0)
                return -1;
 
-       newtFormAddHotKey(self->b.form, 'a');
-       newtFormAddHotKey(self->b.form, '?');
-       newtFormAddHotKey(self->b.form, 'h');
-       newtFormAddHotKey(self->b.form, 'd');
-       newtFormAddHotKey(self->b.form, 'D');
-       newtFormAddHotKey(self->b.form, 't');
-
-       newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
-       newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
-       newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
+       ui_browser__add_exit_keys(&self->b, exit_keys);
 
        while (1) {
-               ui_browser__run(&self->b, es);
+               key = ui_browser__run(&self->b);
 
-               if (es->reason != NEWT_EXIT_HOTKEY)
-                       break;
-               switch (es->u.key) {
+               switch (key) {
                case 'D': { /* Debug */
                        static int seq;
                        struct hist_entry *h = rb_entry(self->b.top,
@@ -245,18 +322,26 @@ static int hist_browser__run(struct hist_browser *self, const char *title,
                                           self->b.top_idx,
                                           h->row_offset, h->nr_rows);
                }
-                       continue;
+                       break;
+               case 'C':
+                       /* Collapse the whole world. */
+                       hist_browser__set_folding(self, false);
+                       break;
+               case 'E':
+                       /* Expand the whole world. */
+                       hist_browser__set_folding(self, true);
+                       break;
                case NEWT_KEY_ENTER:
                        if (hist_browser__toggle_fold(self))
                                break;
                        /* fall thru */
                default:
-                       return 0;
+                       goto out;
                }
        }
-
+out:
        ui_browser__hide(&self->b);
-       return 0;
+       return key;
 }
 
 static char *callchain_list__sym_name(struct callchain_list *self,
@@ -306,15 +391,10 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
                        int color;
                        bool was_first = first;
 
-                       if (first) {
+                       if (first)
                                first = false;
-                               chain->ms.has_children = chain->list.next != &child->val ||
-                                                        rb_first(&child->rb_root) != NULL;
-                       } else {
+                       else
                                extra_offset = LEVEL_OFFSET_STEP;
-                               chain->ms.has_children = chain->list.next == &child->val &&
-                                                        rb_first(&child->rb_root) != NULL;
-                       }
 
                        folded_sign = callchain_list__folded(chain);
                        if (*row_offset != 0) {
@@ -341,8 +421,8 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
                                *is_current_entry = true;
                        }
 
-                       SLsmg_set_color(color);
-                       SLsmg_gotorc(self->b.y + row, self->b.x);
+                       ui_browser__set_color(&self->b, color);
+                       ui_browser__gotorc(&self->b, row, 0);
                        slsmg_write_nstring(" ", offset + extra_offset);
                        slsmg_printf("%c ", folded_sign);
                        slsmg_write_nstring(str, width);
@@ -384,12 +464,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
        list_for_each_entry(chain, &node->val, list) {
                char ipstr[BITS_PER_LONG / 4 + 1], *s;
                int color;
-               /*
-                * FIXME: This should be moved to somewhere else,
-                * probably when the callchain is created, so as not to
-                * traverse it all over again
-                */
-               chain->ms.has_children = rb_first(&node->rb_root) != NULL;
+
                folded_sign = callchain_list__folded(chain);
 
                if (*row_offset != 0) {
@@ -405,8 +480,8 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
                }
 
                s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
-               SLsmg_gotorc(self->b.y + row, self->b.x);
-               SLsmg_set_color(color);
+               ui_browser__gotorc(&self->b, row, 0);
+               ui_browser__set_color(&self->b, color);
                slsmg_write_nstring(" ", offset);
                slsmg_printf("%c ", folded_sign);
                slsmg_write_nstring(s, width - 2);
@@ -465,7 +540,7 @@ static int hist_browser__show_entry(struct hist_browser *self,
        }
 
        if (symbol_conf.use_callchain) {
-               entry->ms.has_children = !RB_EMPTY_ROOT(&entry->sorted_chain);
+               hist_entry__init_have_children(entry);
                folded_sign = hist_entry__folded(entry);
        }
 
@@ -484,8 +559,8 @@ static int hist_browser__show_entry(struct hist_browser *self,
                                color = HE_COLORSET_NORMAL;
                }
 
-               SLsmg_set_color(color);
-               SLsmg_gotorc(self->b.y + row, self->b.x);
+               ui_browser__set_color(&self->b, color);
+               ui_browser__gotorc(&self->b, row, 0);
                if (symbol_conf.use_callchain) {
                        slsmg_printf("%c ", folded_sign);
                        width -= 2;
@@ -687,8 +762,6 @@ static struct hist_browser *hist_browser__new(struct hists *hists)
 
 static void hist_browser__delete(struct hist_browser *self)
 {
-       newtFormDestroy(self->b.form);
-       newtPopWindow();
        free(self);
 }
 
@@ -702,21 +775,26 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *self)
        return self->he_selection->thread;
 }
 
-static int hist_browser__title(char *bf, size_t size, const char *ev_name,
-                              const struct dso *dso, const struct thread *thread)
+static int hists__browser_title(struct hists *self, char *bf, size_t size,
+                               const char *ev_name, const struct dso *dso,
+                               const struct thread *thread)
 {
-       int printed = 0;
+       char unit;
+       int printed;
+       unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE];
+
+       nr_events = convert_unit(nr_events, &unit);
+       printed = snprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name);
 
        if (thread)
                printed += snprintf(bf + printed, size - printed,
-                                   "Thread: %s(%d)",
-                                   (thread->comm_set ?  thread->comm : ""),
+                                   "Thread: %s(%d)",
+                                   (thread->comm_set ? thread->comm : ""),
                                    thread->pid);
        if (dso)
                printed += snprintf(bf + printed, size - printed,
-                                   "%sDSO: %s", thread ? " " : "",
-                                   dso->short_name);
-       return printed ?: snprintf(bf, size, "Event: %s", ev_name);
+                                   ", DSO: %s", dso->short_name);
+       return printed;
 }
 
 int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
@@ -725,7 +803,6 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
        struct pstack *fstack;
        const struct thread *thread_filter = NULL;
        const struct dso *dso_filter = NULL;
-       struct newtExitStruct es;
        char msg[160];
        int key = -1;
 
@@ -738,9 +815,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
 
        ui_helpline__push(helpline);
 
-       hist_browser__title(msg, sizeof(msg), ev_name,
-                           dso_filter, thread_filter);
-
+       hists__browser_title(self, msg, sizeof(msg), ev_name,
+                            dso_filter, thread_filter);
        while (1) {
                const struct thread *thread;
                const struct dso *dso;
@@ -749,70 +825,63 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
                    annotate = -2, zoom_dso = -2, zoom_thread = -2,
                    browse_map = -2;
 
-               if (hist_browser__run(browser, msg, &es))
-                       break;
+               key = hist_browser__run(browser, msg);
 
                thread = hist_browser__selected_thread(browser);
                dso = browser->selection->map ? browser->selection->map->dso : NULL;
 
-               if (es.reason == NEWT_EXIT_HOTKEY) {
-                       key = es.u.key;
-
-                       switch (key) {
-                       case NEWT_KEY_F1:
-                               goto do_help;
-                       case NEWT_KEY_TAB:
-                       case NEWT_KEY_UNTAB:
-                               /*
-                                * Exit the browser, let hists__browser_tree
-                                * go to the next or previous
-                                */
-                               goto out_free_stack;
-                       default:;
-                       }
-
-                       switch (key) {
-                       case 'a':
-                               if (browser->selection->map == NULL &&
-                                   browser->selection->map->dso->annotate_warned)
-                                       continue;
-                               goto do_annotate;
-                       case 'd':
-                               goto zoom_dso;
-                       case 't':
-                               goto zoom_thread;
-                       case 'h':
-                       case '?':
-do_help:
-                               ui__help_window("->        Zoom into DSO/Threads & Annotate current symbol\n"
-                                               "<-        Zoom out\n"
-                                               "a         Annotate current symbol\n"
-                                               "h/?/F1    Show this window\n"
-                                               "d         Zoom into current DSO\n"
-                                               "t         Zoom into current Thread\n"
-                                               "q/CTRL+C  Exit browser");
+               switch (key) {
+               case NEWT_KEY_TAB:
+               case NEWT_KEY_UNTAB:
+                       /*
+                        * Exit the browser, let hists__browser_tree
+                        * go to the next or previous
+                        */
+                       goto out_free_stack;
+               case 'a':
+                       if (browser->selection->map == NULL &&
+                           browser->selection->map->dso->annotate_warned)
                                continue;
-                       default:;
-                       }
-                       if (is_exit_key(key)) {
-                               if (key == NEWT_KEY_ESCAPE &&
-                                   !ui__dialog_yesno("Do you really want to exit?"))
-                                       continue;
-                               break;
-                       }
-
-                       if (es.u.key == NEWT_KEY_LEFT) {
-                               const void *top;
+                       goto do_annotate;
+               case 'd':
+                       goto zoom_dso;
+               case 't':
+                       goto zoom_thread;
+               case NEWT_KEY_F1:
+               case 'h':
+               case '?':
+                       ui__help_window("->        Zoom into DSO/Threads & Annotate current symbol\n"
+                                       "<-        Zoom out\n"
+                                       "a         Annotate current symbol\n"
+                                       "h/?/F1    Show this window\n"
+                                       "C         Collapse all callchains\n"
+                                       "E         Expand all callchains\n"
+                                       "d         Zoom into current DSO\n"
+                                       "t         Zoom into current Thread\n"
+                                       "q/CTRL+C  Exit browser");
+                       continue;
+               case NEWT_KEY_ENTER:
+               case NEWT_KEY_RIGHT:
+                       /* menu */
+                       break;
+               case NEWT_KEY_LEFT: {
+                       const void *top;
 
-                               if (pstack__empty(fstack))
-                                       continue;
-                               top = pstack__pop(fstack);
-                               if (top == &dso_filter)
-                                       goto zoom_out_dso;
-                               if (top == &thread_filter)
-                                       goto zoom_out_thread;
+                       if (pstack__empty(fstack))
                                continue;
-                       }
+                       top = pstack__pop(fstack);
+                       if (top == &dso_filter)
+                               goto zoom_out_dso;
+                       if (top == &thread_filter)
+                               goto zoom_out_thread;
+                       continue;
+               }
+               case NEWT_KEY_ESCAPE:
+                       if (!ui__dialog_yesno("Do you really want to exit?"))
+                               continue;
+                       /* Fall thru */
+               default:
+                       goto out_free_stack;
                }
 
                if (browser->selection->sym != NULL &&
@@ -885,8 +954,8 @@ zoom_out_dso:
                                pstack__push(fstack, &dso_filter);
                        }
                        hists__filter_by_dso(self, dso_filter);
-                       hist_browser__title(msg, sizeof(msg), ev_name,
-                                           dso_filter, thread_filter);
+                       hists__browser_title(self, msg, sizeof(msg), ev_name,
+                                            dso_filter, thread_filter);
                        hist_browser__reset(browser);
                } else if (choice == zoom_thread) {
 zoom_thread:
@@ -903,8 +972,8 @@ zoom_out_thread:
                                pstack__push(fstack, &thread_filter);
                        }
                        hists__filter_by_thread(self, thread_filter);
-                       hist_browser__title(msg, sizeof(msg), ev_name,
-                                           dso_filter, thread_filter);
+                       hists__browser_title(self, msg, sizeof(msg), ev_name,
+                                            dso_filter, thread_filter);
                        hist_browser__reset(browser);
                }
        }
@@ -925,10 +994,6 @@ int hists__tui_browse_tree(struct rb_root *self, const char *help)
                const char *ev_name = __event_name(hists->type, hists->config);
 
                key = hists__browse(hists, help, ev_name);
-
-               if (is_exit_key(key))
-                       break;
-
                switch (key) {
                case NEWT_KEY_TAB:
                        next = rb_next(nd);
@@ -940,7 +1005,7 @@ int hists__tui_browse_tree(struct rb_root *self, const char *help)
                                continue;
                        nd = rb_prev(nd);
                default:
-                       break;
+                       return key;
                }
        }
 
index 142b825b42bf41d90ee013f51aaac5e79df44f5e..e35437dfa5b48aea8a0fb0237b2bf7ed7aaf90cd 100644 (file)
@@ -1,6 +1,5 @@
 #include "../libslang.h"
 #include <elf.h>
-#include <newt.h>
 #include <sys/ttydefaults.h>
 #include <ctype.h>
 #include <string.h>
@@ -47,7 +46,6 @@ out_free_form:
 struct map_browser {
        struct ui_browser b;
        struct map        *map;
-       u16               namelen;
        u8                addrlen;
 };
 
@@ -56,14 +54,16 @@ static void map_browser__write(struct ui_browser *self, void *nd, int row)
        struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
        struct map_browser *mb = container_of(self, struct map_browser, b);
        bool current_entry = ui_browser__is_current_entry(self, row);
-       int color = ui_browser__percent_color(0, current_entry);
+       int width;
 
-       SLsmg_set_color(color);
+       ui_browser__set_percent_color(self, 0, current_entry);
        slsmg_printf("%*llx %*llx %c ",
                     mb->addrlen, sym->start, mb->addrlen, sym->end,
                     sym->binding == STB_GLOBAL ? 'g' :
                     sym->binding == STB_LOCAL  ? 'l' : 'w');
-       slsmg_write_nstring(sym->name, mb->namelen);
+       width = self->width - ((mb->addrlen * 2) + 4);
+       if (width > 0)
+               slsmg_write_nstring(sym->name, width);
 }
 
 /* FIXME uber-kludgy, see comment on cmd_report... */
@@ -98,31 +98,29 @@ static int map_browser__search(struct map_browser *self)
        return 0;
 }
 
-static int map_browser__run(struct map_browser *self, struct newtExitStruct *es)
+static int map_browser__run(struct map_browser *self)
 {
+       int key;
+
        if (ui_browser__show(&self->b, self->map->dso->long_name,
                             "Press <- or ESC to exit, %s / to search",
                             verbose ? "" : "restart with -v to use") < 0)
                return -1;
 
-       newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
-       newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
        if (verbose)
-               newtFormAddHotKey(self->b.form, '/');
+               ui_browser__add_exit_key(&self->b, '/');
 
        while (1) {
-               ui_browser__run(&self->b, es);
+               key = ui_browser__run(&self->b);
 
-               if (es->reason != NEWT_EXIT_HOTKEY)
-                       break;
-               if (verbose && es->u.key == '/')
+               if (verbose && key == '/')
                        map_browser__search(self);
                else
                        break;
        }
 
        ui_browser__hide(&self->b);
-       return 0;
+       return key;
 }
 
 int map__browse(struct map *self)
@@ -136,7 +134,6 @@ int map__browse(struct map *self)
                },
                .map = self,
        };
-       struct newtExitStruct es;
        struct rb_node *nd;
        char tmp[BITS_PER_LONG / 4];
        u64 maxaddr = 0;
@@ -144,8 +141,6 @@ int map__browse(struct map *self)
        for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) {
                struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
 
-               if (mb.namelen < pos->namelen)
-                       mb.namelen = pos->namelen;
                if (maxaddr < pos->end)
                        maxaddr = pos->end;
                if (verbose) {
@@ -156,6 +151,5 @@ int map__browse(struct map *self)
        }
 
        mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr);
-       mb.b.width += mb.addrlen * 2 + 4 + mb.namelen;
-       return map_browser__run(&mb, &es);
+       return map_browser__run(&mb);
 }
index 04600e26ceea21d08b701570494c5e72210aaa9e..9706d9d40279859321412b270c8ac3053141f4f9 100644 (file)
@@ -11,8 +11,6 @@
 #include "helpline.h"
 #include "util.h"
 
-newtComponent newt_form__new(void);
-
 static void newt_form__set_exit_keys(newtComponent self)
 {
        newtFormAddHotKey(self, NEWT_KEY_LEFT);
@@ -22,7 +20,7 @@ static void newt_form__set_exit_keys(newtComponent self)
        newtFormAddHotKey(self, CTRL('c'));
 }
 
-newtComponent newt_form__new(void)
+static newtComponent newt_form__new(void)
 {
        newtComponent self = newtForm(NULL, NULL, 0);
        if (self)
index f380fed74359034a843756256d6e7a79b0ff22b5..7562707ddd1c491755dc8ea5121637918ba1b844 100644 (file)
@@ -266,19 +266,6 @@ bool strglobmatch(const char *str, const char *pat);
 bool strlazymatch(const char *str, const char *pat);
 unsigned long convert_unit(unsigned long value, char *unit);
 
-#ifndef ESC
-#define ESC 27
-#endif
-
-static inline bool is_exit_key(int key)
-{
-       char up;
-       if (key == CTRL('c') || key == ESC)
-               return true;
-       up = toupper(key);
-       return up == 'Q';
-}
-
 #define _STR(x) #x
 #define STR(x) _STR(x)
 
index 66cf65b510b11c1c245db3b6a435e040ac3606fc..c1f1e3c6298462f8ed4d672257dac3b4da081b33 100644 (file)
@@ -218,7 +218,6 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
        events = file->f_op->poll(file, &irqfd->pt);
 
        list_add_tail(&irqfd->list, &kvm->irqfds.items);
-       spin_unlock_irq(&kvm->irqfds.lock);
 
        /*
         * Check if there was an event already pending on the eventfd
@@ -227,6 +226,8 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
        if (events & POLLIN)
                schedule_work(&irqfd->inject);
 
+       spin_unlock_irq(&kvm->irqfds.lock);
+
        /*
         * do not drop the file until the irqfd is fully initialized, otherwise
         * we might race against the POLLHUP
index d4853a54771a04a2bb3e126b1aa54ec13e5c29d0..5186e728c53ed7e27ba6d32df31d37f7aa140ebc 100644 (file)
@@ -1970,10 +1970,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
 
 asmlinkage void kvm_handle_fault_on_reboot(void)
 {
-       if (kvm_rebooting)
+       if (kvm_rebooting) {
                /* spin while reset goes on */
+               local_irq_enable();
                while (true)
                        ;
+       }
        /* Fault while not rebooting.  We want the trace. */
        BUG();
 }