def_bool y
depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386)
+menuconfig PROCESSOR_SELECT
+ default y
+ bool "Supported processor vendors" if EMBEDDED
+ help
+ This lets you choose what x86 vendor support code your kernel
+ will include.
+
+config CPU_SUP_INTEL_32
+ default y
+ bool "Support Intel processors" if PROCESSOR_SELECT
+ depends on !64BIT
+ help
+ This enables extended support for Intel processors
+
+config CPU_SUP_INTEL_64
+ default y
+ bool "Support Intel processors" if PROCESSOR_SELECT
+ depends on 64BIT
+ help
+ This enables extended support for Intel processors
+
+config CPU_SUP_CYRIX_32
+ default y
+ bool "Support Cyrix processors" if PROCESSOR_SELECT
+ depends on !64BIT
+ help
+ This enables extended support for Cyrix processors
+
+config CPU_SUP_AMD
+ default y
+ bool "Support AMD processors" if PROCESSOR_SELECT
+ help
+ This enables extended support for AMD processors
+
+config CPU_SUP_CENTAUR_32
+ default y
+ bool "Support Centaur processors" if PROCESSOR_SELECT
+ depends on !64BIT
+ help
+ This enables extended support for Centaur processors
+
+config CPU_SUP_CENTAUR_64
+ default y
+ bool "Support Centaur processors" if PROCESSOR_SELECT
+ depends on 64BIT
+ help
+ This enables extended support for Centaur processors
+
+config CPU_SUP_TRANSMETA_32
+ default y
+ bool "Support Transmeta processors" if PROCESSOR_SELECT
+ depends on !64BIT
+ help
+ This enables extended support for Transmeta processors
+
+config CPU_SUP_UMC_32
+ default y
+ bool "Support UMC processors" if PROCESSOR_SELECT
+ depends on !64BIT
+ help
+ This enables extended support for UMC processors
++
+ config X86_DS
+ bool "Debug Store support"
+ default y
+ help
+ Add support for Debug Store.
+ This allows the kernel to provide a memory buffer to the hardware
+ to store various profiling and tracing events.
+
+ config X86_PTRACE_BTS
+ bool "ptrace interface to Branch Trace Store"
+ default y
+ depends on (X86_DS && X86_DEBUGCTLMSR)
+ help
+ Add a ptrace interface to allow collecting an execution trace
+ of the traced task.
+ This collects control flow changes in a (cyclic) buffer and allows
+ debuggers to fill in the gaps and show an execution trace of the debuggee.
#include <mach_apic.h>
#endif
-#ifdef CONFIG_X86_INTEL_USERCOPY
-/*
- * Alignment at which movsl is preferred for bulk memory copies.
- */
-struct movsl_mask movsl_mask __read_mostly;
-#endif
-
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
/*
* find out the number of processor cores on the die
*/
-static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
+static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
{
unsigned int eax, ebx, ecx, edx;
if (p)
strcpy(c->x86_model_id, p);
- c->x86_max_cores = num_cpu_cores(c);
+ detect_extended_topology(c);
- detect_ht(c);
+ if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
+ /*
+ * let's use the legacy cpuid vector 0x1 and 0x4 for topology
+ * detection.
+ */
+ c->x86_max_cores = intel_num_cpu_cores(c);
+ detect_ht(c);
+ }
/* Work around errata */
Intel_errata_workarounds(c);
if (cpu_has_xmm2)
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
- if (c->x86 == 15) {
+ if (c->x86 == 15)
set_cpu_cap(c, X86_FEATURE_P4);
- }
if (c->x86 == 6)
set_cpu_cap(c, X86_FEATURE_P3);
if (cpu_has_ds) {
set_cpu_cap(c, X86_FEATURE_BTS);
if (!(l1 & (1<<12)))
set_cpu_cap(c, X86_FEATURE_PEBS);
+ ds_init_intel(c);
}
if (cpu_has_bts)
- ds_init_intel(c);
+ ptrace_bts_init_intel(c);
/*
* See if we have a good local APIC by checking for buggy Pentia,
.c_early_init = early_init_intel,
.c_init = init_intel,
.c_size_cache = intel_size_cache,
+ .c_x86_vendor = X86_VENDOR_INTEL,
};
-cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
-
-#ifndef CONFIG_X86_CMPXCHG
-unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
-{
- u8 prev;
- unsigned long flags;
-
- /* Poor man's cmpxchg for 386. Unsuitable for SMP */
- local_irq_save(flags);
- prev = *(u8 *)ptr;
- if (prev == old)
- *(u8 *)ptr = new;
- local_irq_restore(flags);
- return prev;
-}
-EXPORT_SYMBOL(cmpxchg_386_u8);
-
-unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
-{
- u16 prev;
- unsigned long flags;
-
- /* Poor man's cmpxchg for 386. Unsuitable for SMP */
- local_irq_save(flags);
- prev = *(u16 *)ptr;
- if (prev == old)
- *(u16 *)ptr = new;
- local_irq_restore(flags);
- return prev;
-}
-EXPORT_SYMBOL(cmpxchg_386_u16);
-
-unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
-{
- u32 prev;
- unsigned long flags;
-
- /* Poor man's cmpxchg for 386. Unsuitable for SMP */
- local_irq_save(flags);
- prev = *(u32 *)ptr;
- if (prev == old)
- *(u32 *)ptr = new;
- local_irq_restore(flags);
- return prev;
-}
-EXPORT_SYMBOL(cmpxchg_386_u32);
-#endif
-
-#ifndef CONFIG_X86_CMPXCHG64
-unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
-{
- u64 prev;
- unsigned long flags;
-
- /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
- local_irq_save(flags);
- prev = *(u64 *)ptr;
- if (prev == old)
- *(u64 *)ptr = new;
- local_irq_restore(flags);
- return prev;
-}
-EXPORT_SYMBOL(cmpxchg_486_u64);
-#endif
+cpu_dev_register(intel_cpu_dev);
/* arch_initcall(intel_cpu_init); */
#include <asm/tlbflush.h>
#include <asm/cpu.h>
#include <asm/kdebug.h>
+#include <asm/syscalls.h>
+#include <asm/smp.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
{
/* This must be done before dead CPU ack */
cpu_exit_clear();
- wbinvd();
mb();
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
* With physical CPU hotplug, we should halt the cpu
*/
local_irq_disable();
- while (1)
- halt();
+ /* mask all interrupts, flush any and all caches, and halt */
+ wbinvd_halt();
}
#else
static inline void play_dead(void)
tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
put_cpu();
}
+ #ifdef CONFIG_X86_DS
+ /* Free any DS contexts that have not been properly released. */
+ if (unlikely(current->thread.ds_ctx)) {
+ /* we clear debugctl to make sure DS is not used. */
+ update_debugctlmsr(0);
+ ds_free(current->thread.ds_ctx);
+ }
+ #endif /* CONFIG_X86_DS */
}
void flush_thread(void)
return 0;
}
+ #ifdef CONFIG_X86_DS
+ static int update_debugctl(struct thread_struct *prev,
+ struct thread_struct *next, unsigned long debugctl)
+ {
+ unsigned long ds_prev = 0;
+ unsigned long ds_next = 0;
+
+ if (prev->ds_ctx)
+ ds_prev = (unsigned long)prev->ds_ctx->ds;
+ if (next->ds_ctx)
+ ds_next = (unsigned long)next->ds_ctx->ds;
+
+ if (ds_next != ds_prev) {
+ /* we clear debugctl to make sure DS
+ * is not in use when we change it */
+ debugctl = 0;
+ update_debugctlmsr(0);
+ wrmsr(MSR_IA32_DS_AREA, ds_next, 0);
+ }
+ return debugctl;
+ }
+ #else
+ static int update_debugctl(struct thread_struct *prev,
+ struct thread_struct *next, unsigned long debugctl)
+ {
+ return debugctl;
+ }
+ #endif /* CONFIG_X86_DS */
+
static noinline void
__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss)
prev = &prev_p->thread;
next = &next_p->thread;
- debugctl = prev->debugctlmsr;
- if (next->ds_area_msr != prev->ds_area_msr) {
- /* we clear debugctl to make sure DS
- * is not in use when we change it */
- debugctl = 0;
- update_debugctlmsr(0);
- wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
- }
+ debugctl = update_debugctl(prev, next, prev->debugctlmsr);
if (next->debugctlmsr != debugctl)
update_debugctlmsr(next->debugctlmsr);
hard_enable_TSC();
}
- #ifdef X86_BTS
+ #ifdef CONFIG_X86_PTRACE_BTS
if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
- #endif
+ #endif /* CONFIG_X86_PTRACE_BTS */
if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
#include <asm/proto.h>
#include <asm/ia32.h>
#include <asm/idle.h>
+#include <asm/syscalls.h>
asmlinkage extern void ret_from_fork(void);
static inline void play_dead(void)
{
idle_task_exit();
- wbinvd();
mb();
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
local_irq_disable();
- while (1)
- halt();
+ /* mask all interrupts, flush any and all caches, and halt */
+ wbinvd_halt();
}
#else
static inline void play_dead(void)
t->io_bitmap_max = 0;
put_cpu();
}
+ #ifdef CONFIG_X86_DS
+ /* Free any DS contexts that have not been properly released. */
+ if (unlikely(t->ds_ctx)) {
+ /* we clear debugctl to make sure DS is not used. */
+ update_debugctlmsr(0);
+ ds_free(t->ds_ctx);
+ }
+ #endif /* CONFIG_X86_DS */
}
void flush_thread(void)
next = &next_p->thread;
debugctl = prev->debugctlmsr;
- if (next->ds_area_msr != prev->ds_area_msr) {
- /* we clear debugctl to make sure DS
- * is not in use when we change it */
- debugctl = 0;
- update_debugctlmsr(0);
- wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
+
+ #ifdef CONFIG_X86_DS
+ {
+ unsigned long ds_prev = 0, ds_next = 0;
+
+ if (prev->ds_ctx)
+ ds_prev = (unsigned long)prev->ds_ctx->ds;
+ if (next->ds_ctx)
+ ds_next = (unsigned long)next->ds_ctx->ds;
+
+ if (ds_next != ds_prev) {
+ /*
+ * We clear debugctl to make sure DS
+ * is not in use when we change it:
+ */
+ debugctl = 0;
+ update_debugctlmsr(0);
+ wrmsrl(MSR_IA32_DS_AREA, ds_next);
+ }
}
+ #endif /* CONFIG_X86_DS */
if (next->debugctlmsr != debugctl)
update_debugctlmsr(next->debugctlmsr);
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
}
- #ifdef X86_BTS
+ #ifdef CONFIG_X86_PTRACE_BTS
if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
- #endif
+ #endif /* CONFIG_X86_PTRACE_BTS */
}
/*
#define FLAG_MASK FLAG_MASK_32
-static long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
+static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
{
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
regno >>= 2;
return 0;
}
- #ifdef X86_BTS
+ #ifdef CONFIG_X86_PTRACE_BTS
+ /*
+ * The configuration for a particular BTS hardware implementation.
+ */
+ struct bts_configuration {
+ /* the size of a BTS record in bytes; at most BTS_MAX_RECORD_SIZE */
+ unsigned char sizeof_bts;
+ /* the size of a field in the BTS record in bytes */
+ unsigned char sizeof_field;
+ /* a bitmask to enable/disable BTS in DEBUGCTL MSR */
+ unsigned long debugctl_mask;
+ };
+ static struct bts_configuration bts_cfg;
+
+ #define BTS_MAX_RECORD_SIZE (8 * 3)
+
+
+ /*
+ * Branch Trace Store (BTS) uses the following format. Different
+ * architectures vary in the size of those fields.
+ * - source linear address
+ * - destination linear address
+ * - flags
+ *
+ * Later architectures use 64bit pointers throughout, whereas earlier
+ * architectures use 32bit pointers in 32bit mode.
+ *
+ * We compute the base address for the first 8 fields based on:
+ * - the field size stored in the DS configuration
+ * - the relative field position
+ *
+ * In order to store additional information in the BTS buffer, we use
+ * a special source address to indicate that the record requires
+ * special interpretation.
+ *
+ * Netburst indicated via a bit in the flags field whether the branch
+ * was predicted; this is ignored.
+ */
+
+ enum bts_field {
+ bts_from = 0,
+ bts_to,
+ bts_flags,
+
+ bts_escape = (unsigned long)-1,
+ bts_qual = bts_to,
+ bts_jiffies = bts_flags
+ };
- static int ptrace_bts_get_size(struct task_struct *child)
+ static inline unsigned long bts_get(const char *base, enum bts_field field)
{
- if (!child->thread.ds_area_msr)
- return -ENXIO;
+ base += (bts_cfg.sizeof_field * field);
+ return *(unsigned long *)base;
+ }
+
+ static inline void bts_set(char *base, enum bts_field field, unsigned long val)
+ {
+ base += (bts_cfg.sizeof_field * field);;
+ (*(unsigned long *)base) = val;
+ }
- return ds_get_bts_index((void *)child->thread.ds_area_msr);
+ /*
+ * Translate a BTS record from the raw format into the bts_struct format
+ *
+ * out (out): bts_struct interpretation
+ * raw: raw BTS record
+ */
+ static void ptrace_bts_translate_record(struct bts_struct *out, const void *raw)
+ {
+ memset(out, 0, sizeof(*out));
+ if (bts_get(raw, bts_from) == bts_escape) {
+ out->qualifier = bts_get(raw, bts_qual);
+ out->variant.jiffies = bts_get(raw, bts_jiffies);
+ } else {
+ out->qualifier = BTS_BRANCH;
+ out->variant.lbr.from_ip = bts_get(raw, bts_from);
+ out->variant.lbr.to_ip = bts_get(raw, bts_to);
+ }
}
- static int ptrace_bts_read_record(struct task_struct *child,
- long index,
+ static int ptrace_bts_read_record(struct task_struct *child, size_t index,
struct bts_struct __user *out)
{
struct bts_struct ret;
- int retval;
- int bts_end;
- int bts_index;
+ const void *bts_record;
+ size_t bts_index, bts_end;
+ int error;
- if (!child->thread.ds_area_msr)
- return -ENXIO;
+ error = ds_get_bts_end(child, &bts_end);
+ if (error < 0)
+ return error;
- if (index < 0)
- return -EINVAL;
-
- bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr);
if (bts_end <= index)
return -EINVAL;
+ error = ds_get_bts_index(child, &bts_index);
+ if (error < 0)
+ return error;
+
/* translate the ptrace bts index into the ds bts index */
- bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr);
- bts_index -= (index + 1);
- if (bts_index < 0)
- bts_index += bts_end;
+ bts_index += bts_end - (index + 1);
+ if (bts_end <= bts_index)
+ bts_index -= bts_end;
+
+ error = ds_access_bts(child, bts_index, &bts_record);
+ if (error < 0)
+ return error;
- retval = ds_read_bts((void *)child->thread.ds_area_msr,
- bts_index, &ret);
- if (retval < 0)
- return retval;
+ ptrace_bts_translate_record(&ret, bts_record);
if (copy_to_user(out, &ret, sizeof(ret)))
return -EFAULT;
return sizeof(ret);
}
- static int ptrace_bts_clear(struct task_struct *child)
- {
- if (!child->thread.ds_area_msr)
- return -ENXIO;
-
- return ds_clear((void *)child->thread.ds_area_msr);
- }
-
static int ptrace_bts_drain(struct task_struct *child,
long size,
struct bts_struct __user *out)
{
- int end, i;
- void *ds = (void *)child->thread.ds_area_msr;
-
- if (!ds)
- return -ENXIO;
+ struct bts_struct ret;
+ const unsigned char *raw;
+ size_t end, i;
+ int error;
- end = ds_get_bts_index(ds);
- if (end <= 0)
- return end;
+ error = ds_get_bts_index(child, &end);
+ if (error < 0)
+ return error;
if (size < (end * sizeof(struct bts_struct)))
return -EIO;
- for (i = 0; i < end; i++, out++) {
- struct bts_struct ret;
- int retval;
+ error = ds_access_bts(child, 0, (const void **)&raw);
+ if (error < 0)
+ return error;
- retval = ds_read_bts(ds, i, &ret);
- if (retval < 0)
- return retval;
+ for (i = 0; i < end; i++, out++, raw += bts_cfg.sizeof_bts) {
+ ptrace_bts_translate_record(&ret, raw);
if (copy_to_user(out, &ret, sizeof(ret)))
return -EFAULT;
}
- ds_clear(ds);
+ error = ds_clear_bts(child);
+ if (error < 0)
+ return error;
return end;
}
+ static void ptrace_bts_ovfl(struct task_struct *child)
+ {
+ send_sig(child->thread.bts_ovfl_signal, child, 0);
+ }
+
static int ptrace_bts_config(struct task_struct *child,
long cfg_size,
const struct ptrace_bts_config __user *ucfg)
{
struct ptrace_bts_config cfg;
- int bts_size, ret = 0;
- void *ds;
+ int error = 0;
+
+ error = -EOPNOTSUPP;
+ if (!bts_cfg.sizeof_bts)
+ goto errout;
+ error = -EIO;
if (cfg_size < sizeof(cfg))
- return -EIO;
+ goto errout;
+ error = -EFAULT;
if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
- return -EFAULT;
+ goto errout;
- if ((int)cfg.size < 0)
- return -EINVAL;
+ error = -EINVAL;
+ if ((cfg.flags & PTRACE_BTS_O_SIGNAL) &&
+ !(cfg.flags & PTRACE_BTS_O_ALLOC))
+ goto errout;
- bts_size = 0;
- ds = (void *)child->thread.ds_area_msr;
- if (ds) {
- bts_size = ds_get_bts_size(ds);
- if (bts_size < 0)
- return bts_size;
- }
- cfg.size = PAGE_ALIGN(cfg.size);
+ if (cfg.flags & PTRACE_BTS_O_ALLOC) {
+ ds_ovfl_callback_t ovfl = 0;
+ unsigned int sig = 0;
+
+ /* we ignore the error in case we were not tracing child */
+ (void)ds_release_bts(child);
+
+ if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
+ if (!cfg.signal)
+ goto errout;
+
+ sig = cfg.signal;
+ ovfl = ptrace_bts_ovfl;
+ }
- if (bts_size != cfg.size) {
- ret = ptrace_bts_realloc(child, cfg.size,
- cfg.flags & PTRACE_BTS_O_CUT_SIZE);
- if (ret < 0)
+ error = ds_request_bts(child, /* base = */ 0, cfg.size, ovfl);
+ if (error < 0)
goto errout;
- ds = (void *)child->thread.ds_area_msr;
+ child->thread.bts_ovfl_signal = sig;
}
- if (cfg.flags & PTRACE_BTS_O_SIGNAL)
- ret = ds_set_overflow(ds, DS_O_SIGNAL);
- else
- ret = ds_set_overflow(ds, DS_O_WRAP);
- if (ret < 0)
+ error = -EINVAL;
+ if (!child->thread.ds_ctx && cfg.flags)
goto errout;
if (cfg.flags & PTRACE_BTS_O_TRACE)
- child->thread.debugctlmsr |= ds_debugctl_mask();
+ child->thread.debugctlmsr |= bts_cfg.debugctl_mask;
else
- child->thread.debugctlmsr &= ~ds_debugctl_mask();
+ child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
if (cfg.flags & PTRACE_BTS_O_SCHED)
set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
else
clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
- ret = sizeof(cfg);
+ error = sizeof(cfg);
out:
if (child->thread.debugctlmsr)
else
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
- return ret;
+ return error;
errout:
- child->thread.debugctlmsr &= ~ds_debugctl_mask();
+ child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
goto out;
}
long cfg_size,
struct ptrace_bts_config __user *ucfg)
{
- void *ds = (void *)child->thread.ds_area_msr;
struct ptrace_bts_config cfg;
+ size_t end;
+ const void *base, *max;
+ int error;
if (cfg_size < sizeof(cfg))
return -EIO;
- memset(&cfg, 0, sizeof(cfg));
+ error = ds_get_bts_end(child, &end);
+ if (error < 0)
+ return error;
- if (ds) {
- cfg.size = ds_get_bts_size(ds);
+ error = ds_access_bts(child, /* index = */ 0, &base);
+ if (error < 0)
+ return error;
- if (ds_get_overflow(ds) == DS_O_SIGNAL)
- cfg.flags |= PTRACE_BTS_O_SIGNAL;
+ error = ds_access_bts(child, /* index = */ end, &max);
+ if (error < 0)
+ return error;
- if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
- child->thread.debugctlmsr & ds_debugctl_mask())
- cfg.flags |= PTRACE_BTS_O_TRACE;
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.size = (max - base);
+ cfg.signal = child->thread.bts_ovfl_signal;
+ cfg.bts_size = sizeof(struct bts_struct);
- if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
- cfg.flags |= PTRACE_BTS_O_SCHED;
- }
+ if (cfg.signal)
+ cfg.flags |= PTRACE_BTS_O_SIGNAL;
- cfg.bts_size = sizeof(struct bts_struct);
+ if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
+ child->thread.debugctlmsr & bts_cfg.debugctl_mask)
+ cfg.flags |= PTRACE_BTS_O_TRACE;
+
+ if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
+ cfg.flags |= PTRACE_BTS_O_SCHED;
if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
return -EFAULT;
return sizeof(cfg);
}
-
static int ptrace_bts_write_record(struct task_struct *child,
const struct bts_struct *in)
{
- int retval;
+ unsigned char bts_record[BTS_MAX_RECORD_SIZE];
- if (!child->thread.ds_area_msr)
- return -ENXIO;
+ BUG_ON(BTS_MAX_RECORD_SIZE < bts_cfg.sizeof_bts);
- retval = ds_write_bts((void *)child->thread.ds_area_msr, in);
- if (retval)
- return retval;
+ memset(bts_record, 0, bts_cfg.sizeof_bts);
+ switch (in->qualifier) {
+ case BTS_INVALID:
+ break;
- return sizeof(*in);
- }
+ case BTS_BRANCH:
+ bts_set(bts_record, bts_from, in->variant.lbr.from_ip);
+ bts_set(bts_record, bts_to, in->variant.lbr.to_ip);
+ break;
- static int ptrace_bts_realloc(struct task_struct *child,
- int size, int reduce_size)
- {
- unsigned long rlim, vm;
- int ret, old_size;
+ case BTS_TASK_ARRIVES:
+ case BTS_TASK_DEPARTS:
+ bts_set(bts_record, bts_from, bts_escape);
+ bts_set(bts_record, bts_qual, in->qualifier);
+ bts_set(bts_record, bts_jiffies, in->variant.jiffies);
+ break;
- if (size < 0)
+ default:
return -EINVAL;
-
- old_size = ds_get_bts_size((void *)child->thread.ds_area_msr);
- if (old_size < 0)
- return old_size;
-
- ret = ds_free((void **)&child->thread.ds_area_msr);
- if (ret < 0)
- goto out;
-
- size >>= PAGE_SHIFT;
- old_size >>= PAGE_SHIFT;
-
- current->mm->total_vm -= old_size;
- current->mm->locked_vm -= old_size;
-
- if (size == 0)
- goto out;
-
- rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
- vm = current->mm->total_vm + size;
- if (rlim < vm) {
- ret = -ENOMEM;
-
- if (!reduce_size)
- goto out;
-
- size = rlim - current->mm->total_vm;
- if (size <= 0)
- goto out;
}
- rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
- vm = current->mm->locked_vm + size;
- if (rlim < vm) {
- ret = -ENOMEM;
-
- if (!reduce_size)
- goto out;
-
- size = rlim - current->mm->locked_vm;
- if (size <= 0)
- goto out;
- }
-
- ret = ds_allocate((void **)&child->thread.ds_area_msr,
- size << PAGE_SHIFT);
- if (ret < 0)
- goto out;
-
- current->mm->total_vm += size;
- current->mm->locked_vm += size;
-
- out:
- if (child->thread.ds_area_msr)
- set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
- else
- clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
-
- return ret;
+ /* The writing task will be the switched-to task on a context
+ * switch. It needs to write into the switched-from task's BTS
+ * buffer. */
+ return ds_unchecked_write_bts(child, bts_record, bts_cfg.sizeof_bts);
}
void ptrace_bts_take_timestamp(struct task_struct *tsk,
ptrace_bts_write_record(tsk, &rec);
}
- #endif /* X86_BTS */
+
+ static const struct bts_configuration bts_cfg_netburst = {
+ .sizeof_bts = sizeof(long) * 3,
+ .sizeof_field = sizeof(long),
+ .debugctl_mask = (1<<2)|(1<<3)|(1<<5)
+ };
+
+ static const struct bts_configuration bts_cfg_pentium_m = {
+ .sizeof_bts = sizeof(long) * 3,
+ .sizeof_field = sizeof(long),
+ .debugctl_mask = (1<<6)|(1<<7)
+ };
+
+ static const struct bts_configuration bts_cfg_core2 = {
+ .sizeof_bts = 8 * 3,
+ .sizeof_field = 8,
+ .debugctl_mask = (1<<6)|(1<<7)|(1<<9)
+ };
+
+ static inline void bts_configure(const struct bts_configuration *cfg)
+ {
+ bts_cfg = *cfg;
+ }
+
+ void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c)
+ {
+ switch (c->x86) {
+ case 0x6:
+ switch (c->x86_model) {
+ case 0xD:
+ case 0xE: /* Pentium M */
+ bts_configure(&bts_cfg_pentium_m);
+ break;
+ case 0xF: /* Core2 */
+ case 0x1C: /* Atom */
+ bts_configure(&bts_cfg_core2);
+ break;
+ default:
+ /* sorry, don't know about them */
+ break;
+ }
+ break;
+ case 0xF:
+ switch (c->x86_model) {
+ case 0x0:
+ case 0x1:
+ case 0x2: /* Netburst */
+ bts_configure(&bts_cfg_netburst);
+ break;
+ default:
+ /* sorry, don't know about them */
+ break;
+ }
+ break;
+ default:
+ /* sorry, don't know about them */
+ break;
+ }
+ }
+ #endif /* CONFIG_X86_PTRACE_BTS */
/*
* Called by kernel/ptrace.c when detaching..
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
#endif
- if (child->thread.ds_area_msr) {
- #ifdef X86_BTS
- ptrace_bts_realloc(child, 0, 0);
- #endif
- child->thread.debugctlmsr &= ~ds_debugctl_mask();
- if (!child->thread.debugctlmsr)
- clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
- clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
- }
+ #ifdef CONFIG_X86_PTRACE_BTS
+ (void)ds_release_bts(child);
+
+ child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
+ if (!child->thread.debugctlmsr)
+ clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
+
+ clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
+ #endif /* CONFIG_X86_PTRACE_BTS */
}
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
/*
* These bits need more cooking - not enabled yet:
*/
- #ifdef X86_BTS
+ #ifdef CONFIG_X86_PTRACE_BTS
case PTRACE_BTS_CONFIG:
ret = ptrace_bts_config
(child, data, (struct ptrace_bts_config __user *)addr);
break;
case PTRACE_BTS_SIZE:
- ret = ptrace_bts_get_size(child);
+ ret = ds_get_bts_index(child, /* pos = */ 0);
break;
case PTRACE_BTS_GET:
break;
case PTRACE_BTS_CLEAR:
- ret = ptrace_bts_clear(child);
+ ret = ds_clear_bts(child);
break;
case PTRACE_BTS_DRAIN:
ret = ptrace_bts_drain
(child, data, (struct bts_struct __user *) addr);
break;
- #endif
+ #endif /* CONFIG_X86_PTRACE_BTS */
default:
ret = ptrace_request(child, request, addr, data);
* Debug Store (DS) support
*
* This provides a low-level interface to the hardware's Debug Store
- * feature that is used for last branch recording (LBR) and
+ * feature that is used for branch trace store (BTS) and
* precise-event based sampling (PEBS).
*
- * Different architectures use a different DS layout/pointer size.
- * The below functions therefore work on a void*.
+ * It manages:
+ * - per-thread and per-cpu allocation of BTS and PEBS
+ * - buffer memory allocation (optional)
+ * - buffer overflow handling
+ * - buffer access
*
+ * It assumes:
+ * - get_task_struct on all parameter tasks
+ * - current is allowed to trace parameter tasks
*
- * Since there is no user for PEBS, yet, only LBR (or branch
- * trace store, BTS) is supported.
*
- *
- * Copyright (C) 2007 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
+ * Copyright (C) 2007-2008 Intel Corporation.
+ * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
*/
-#ifndef _ASM_X86_DS_H
-#define _ASM_X86_DS_H
+#ifndef ASM_X86__DS_H
+#define ASM_X86__DS_H
+ #ifdef CONFIG_X86_DS
+
#include <linux/types.h>
#include <linux/init.h>
- struct cpuinfo_x86;
+ struct task_struct;
- /* a branch trace record entry
+ /*
+ * Request BTS or PEBS
+ *
+ * Due to alignement constraints, the actual buffer may be slightly
+ * smaller than the requested or provided buffer.
*
- * In order to unify the interface between various processor versions,
- * we use the below data structure for all processors.
+ * Returns 0 on success; -Eerrno otherwise
+ *
+ * task: the task to request recording for;
+ * NULL for per-cpu recording on the current cpu
+ * base: the base pointer for the (non-pageable) buffer;
+ * NULL if buffer allocation requested
+ * size: the size of the requested or provided buffer
+ * ovfl: pointer to a function to be called on buffer overflow;
+ * NULL if cyclic buffer requested
*/
- enum bts_qualifier {
- BTS_INVALID = 0,
- BTS_BRANCH,
- BTS_TASK_ARRIVES,
- BTS_TASK_DEPARTS
- };
+ typedef void (*ds_ovfl_callback_t)(struct task_struct *);
+ extern int ds_request_bts(struct task_struct *task, void *base, size_t size,
+ ds_ovfl_callback_t ovfl);
+ extern int ds_request_pebs(struct task_struct *task, void *base, size_t size,
+ ds_ovfl_callback_t ovfl);
+
+ /*
+ * Release BTS or PEBS resources
+ *
+ * Frees buffers allocated on ds_request.
+ *
+ * Returns 0 on success; -Eerrno otherwise
+ *
+ * task: the task to release resources for;
+ * NULL to release resources for the current cpu
+ */
+ extern int ds_release_bts(struct task_struct *task);
+ extern int ds_release_pebs(struct task_struct *task);
+
+ /*
+ * Return the (array) index of the write pointer.
+ * (assuming an array of BTS/PEBS records)
+ *
+ * Returns -Eerrno on error
+ *
+ * task: the task to access;
+ * NULL to access the current cpu
+ * pos (out): if not NULL, will hold the result
+ */
+ extern int ds_get_bts_index(struct task_struct *task, size_t *pos);
+ extern int ds_get_pebs_index(struct task_struct *task, size_t *pos);
+
+ /*
+ * Return the (array) index one record beyond the end of the array.
+ * (assuming an array of BTS/PEBS records)
+ *
+ * Returns -Eerrno on error
+ *
+ * task: the task to access;
+ * NULL to access the current cpu
+ * pos (out): if not NULL, will hold the result
+ */
+ extern int ds_get_bts_end(struct task_struct *task, size_t *pos);
+ extern int ds_get_pebs_end(struct task_struct *task, size_t *pos);
+
+ /*
+ * Provide a pointer to the BTS/PEBS record at parameter index.
+ * (assuming an array of BTS/PEBS records)
+ *
+ * The pointer points directly into the buffer. The user is
+ * responsible for copying the record.
+ *
+ * Returns the size of a single record on success; -Eerrno on error
+ *
+ * task: the task to access;
+ * NULL to access the current cpu
+ * index: the index of the requested record
+ * record (out): pointer to the requested record
+ */
+ extern int ds_access_bts(struct task_struct *task,
+ size_t index, const void **record);
+ extern int ds_access_pebs(struct task_struct *task,
+ size_t index, const void **record);
+
+ /*
+ * Write one or more BTS/PEBS records at the write pointer index and
+ * advance the write pointer.
+ *
+ * If size is not a multiple of the record size, trailing bytes are
+ * zeroed out.
+ *
+ * May result in one or more overflow notifications.
+ *
+ * If called during overflow handling, that is, with index >=
+ * interrupt threshold, the write will wrap around.
+ *
+ * An overflow notification is given if and when the interrupt
+ * threshold is reached during or after the write.
+ *
+ * Returns the number of bytes written or -Eerrno.
+ *
+ * task: the task to access;
+ * NULL to access the current cpu
+ * buffer: the buffer to write
+ * size: the size of the buffer
+ */
+ extern int ds_write_bts(struct task_struct *task,
+ const void *buffer, size_t size);
+ extern int ds_write_pebs(struct task_struct *task,
+ const void *buffer, size_t size);
+
+ /*
+ * Same as ds_write_bts/pebs, but omit ownership checks.
+ *
+ * This is needed to have some other task than the owner of the
+ * BTS/PEBS buffer or the parameter task itself write into the
+ * respective buffer.
+ */
+ extern int ds_unchecked_write_bts(struct task_struct *task,
+ const void *buffer, size_t size);
+ extern int ds_unchecked_write_pebs(struct task_struct *task,
+ const void *buffer, size_t size);
+
+ /*
+ * Reset the write pointer of the BTS/PEBS buffer.
+ *
+ * Returns 0 on success; -Eerrno on error
+ *
+ * task: the task to access;
+ * NULL to access the current cpu
+ */
+ extern int ds_reset_bts(struct task_struct *task);
+ extern int ds_reset_pebs(struct task_struct *task);
+
+ /*
+ * Clear the BTS/PEBS buffer and reset the write pointer.
+ * The entire buffer will be zeroed out.
+ *
+ * Returns 0 on success; -Eerrno on error
+ *
+ * task: the task to access;
+ * NULL to access the current cpu
+ */
+ extern int ds_clear_bts(struct task_struct *task);
+ extern int ds_clear_pebs(struct task_struct *task);
+
+ /*
+ * Provide the PEBS counter reset value.
+ *
+ * Returns 0 on success; -Eerrno on error
+ *
+ * task: the task to access;
+ * NULL to access the current cpu
+ * value (out): the counter reset value
+ */
+ extern int ds_get_pebs_reset(struct task_struct *task, u64 *value);
+
+ /*
+ * Set the PEBS counter reset value.
+ *
+ * Returns 0 on success; -Eerrno on error
+ *
+ * task: the task to access;
+ * NULL to access the current cpu
+ * value: the new counter reset value
+ */
+ extern int ds_set_pebs_reset(struct task_struct *task, u64 value);
+
+ /*
+ * Initialization
+ */
+ struct cpuinfo_x86;
+ extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
+
+
- struct bts_struct {
- u64 qualifier;
- union {
- /* BTS_BRANCH */
- struct {
- u64 from_ip;
- u64 to_ip;
- } lbr;
- /* BTS_TASK_ARRIVES or
- BTS_TASK_DEPARTS */
- u64 jiffies;
- } variant;
+ /*
+ * The DS context - part of struct thread_struct.
+ */
+ struct ds_context {
+ /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
+ unsigned char *ds;
+ /* the owner of the BTS and PEBS configuration, respectively */
+ struct task_struct *owner[2];
+ /* buffer overflow notification function for BTS and PEBS */
+ ds_ovfl_callback_t callback[2];
+ /* the original buffer address */
+ void *buffer[2];
+ /* the number of allocated pages for on-request allocated buffers */
+ unsigned int pages[2];
+ /* use count */
+ unsigned long count;
+ /* a pointer to the context location inside the thread_struct
+ * or the per_cpu context array */
+ struct ds_context **this;
+ /* a pointer to the task owning this context, or NULL, if the
+ * context is owned by a cpu */
+ struct task_struct *task;
};
- /* Overflow handling mechanisms */
- #define DS_O_SIGNAL 1 /* send overflow signal */
- #define DS_O_WRAP 2 /* wrap around */
-
- extern int ds_allocate(void **, size_t);
- extern int ds_free(void **);
- extern int ds_get_bts_size(void *);
- extern int ds_get_bts_end(void *);
- extern int ds_get_bts_index(void *);
- extern int ds_set_overflow(void *, int);
- extern int ds_get_overflow(void *);
- extern int ds_clear(void *);
- extern int ds_read_bts(void *, int, struct bts_struct *);
- extern int ds_write_bts(void *, const struct bts_struct *);
- extern unsigned long ds_debugctl_mask(void);
- extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c);
+ /* called by exit_thread() to free leftover contexts */
+ extern void ds_free(struct ds_context *context);
+
+ #else /* CONFIG_X86_DS */
+
+ #define ds_init_intel(config) do {} while (0)
-#endif /* _ASM_X86_DS_H */
+ #endif /* CONFIG_X86_DS */
+#endif /* ASM_X86__DS_H */
-#ifndef __ASM_X86_PROCESSOR_H
-#define __ASM_X86_PROCESSOR_H
+#ifndef ASM_X86__PROCESSOR_H
+#define ASM_X86__PROCESSOR_H
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
+ #include <asm/ds.h>
#include <linux/personality.h>
#include <linux/cpumask.h>
int x86_tlbsize;
__u8 x86_virt_bits;
__u8 x86_phys_bits;
+#endif
/* CPUID returned core id bits: */
__u8 x86_coreid_bits;
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level;
-#endif
/* Maximum supported CPUID level, -1=no CPUID: */
int cpuid_level;
__u32 x86_capability[NCAPINTS];
#define current_cpu_data boot_cpu_data
#endif
+extern const struct seq_operations cpuinfo_op;
+
static inline int hlt_works(int cpu)
{
#ifdef CONFIG_X86_32
extern void cpu_detect(struct cpuinfo_x86 *c);
+extern struct pt_regs *idle_regs(struct pt_regs *);
+
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;
-#if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64)
+extern void detect_extended_topology(struct cpuinfo_x86 *c);
extern void detect_ht(struct cpuinfo_x86 *c);
-#else
-static inline void detect_ht(struct cpuinfo_x86 *c) {}
-#endif
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
/* 16*16 bytes for each XMM-reg = 256 bytes: */
u32 xmm_space[64];
- u32 padding[24];
+ u32 padding[12];
+
+ union {
+ u32 padding1[12];
+ u32 sw_reserved[12];
+ };
} __attribute__((aligned(16)));
u32 entry_eip;
};
+struct xsave_hdr_struct {
+ u64 xstate_bv;
+ u64 reserved1[2];
+ u64 reserved2[5];
+} __attribute__((packed));
+
+struct xsave_struct {
+ struct i387_fxsave_struct i387;
+ struct xsave_hdr_struct xsave_hdr;
+ /* new processor state extensions will go here */
+} __attribute__ ((packed, aligned (64)));
+
union thread_xstate {
struct i387_fsave_struct fsave;
struct i387_fxsave_struct fxsave;
struct i387_soft_struct soft;
+ struct xsave_struct xsave;
};
#ifdef CONFIG_X86_64
unsigned io_bitmap_max;
/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
unsigned long debugctlmsr;
- /* Debug Store - if not 0 points to a DS Save Area configuration;
- * goes into MSR_IA32_DS_AREA */
- unsigned long ds_area_msr;
+ #ifdef CONFIG_X86_DS
+ /* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */
+ struct ds_context *ds_ctx;
+ #endif /* CONFIG_X86_DS */
+ #ifdef CONFIG_X86_PTRACE_BTS
+ /* the signal to send on a bts buffer overflow */
+ unsigned int bts_ovfl_signal;
+ #endif /* CONFIG_X86_PTRACE_BTS */
};
static inline unsigned long native_get_debugreg(int regno)
extern unsigned long idle_halt;
extern unsigned long idle_nomwait;
+/*
+ * on systems with caches, caches must be flashed as the absolute
+ * last instruction before going into a suspended halt. Otherwise,
+ * dirty data can linger in the cache and become stale on resume,
+ * leading to strange errors.
+ *
+ * perform a variety of operations to guarantee that the compiler
+ * will not reorder instructions. wbinvd itself is serializing
+ * so the processor will not reorder.
+ *
+ * Systems without cache can just go into halt.
+ */
+static inline void wbinvd_halt(void)
+{
+ mb();
+ /* check for clflush to determine if wbinvd is legal */
+ if (cpu_has_clflush)
+ asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
+ else
+ while (1)
+ halt();
+}
+
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);
-#endif
+#endif /* ASM_X86__PROCESSOR_H */
-#ifndef _ASM_X86_PTRACE_ABI_H
-#define _ASM_X86_PTRACE_ABI_H
+#ifndef ASM_X86__PTRACE_ABI_H
+#define ASM_X86__PTRACE_ABI_H
#ifdef __i386__
#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
- #ifndef __ASSEMBLY__
+ #ifdef CONFIG_X86_PTRACE_BTS
+ #ifndef __ASSEMBLY__
#include <asm/types.h>
/* configuration/status structure used in PTRACE_BTS_CONFIG and
/* actual size of bts_struct in bytes */
__u32 bts_size;
};
- #endif
+ #endif /* __ASSEMBLY__ */
#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
instead of wrapping around */
- #define PTRACE_BTS_O_CUT_SIZE 0x8 /* cut requested size to max available
- instead of failing */
+ #define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
#define PTRACE_BTS_CONFIG 40
/* Configure branch trace recording.
ADDR points to a struct ptrace_bts_config.
DATA gives the size of that buffer.
- A new buffer is allocated, iff the size changes.
+ A new buffer is allocated, if requested in the flags.
+ An overflow signal may only be requested for new buffers.
Returns the number of bytes read.
*/
#define PTRACE_BTS_STATUS 41
Returns the number of bytes written.
*/
#define PTRACE_BTS_SIZE 42
- /* Return the number of available BTS records.
+ /* Return the number of available BTS records for draining.
DATA and ADDR are ignored.
*/
#define PTRACE_BTS_GET 43
BTS records are read from oldest to newest.
Returns number of BTS records drained.
*/
+ #endif /* CONFIG_X86_PTRACE_BTS */
-#endif
+#endif /* ASM_X86__PTRACE_ABI_H */
-#ifndef _ASM_X86_PTRACE_H
-#define _ASM_X86_PTRACE_H
+#ifndef ASM_X86__PTRACE_H
+#define ASM_X86__PTRACE_H
#include <linux/compiler.h> /* For __user */
#include <asm/ptrace-abi.h>
#endif /* __KERNEL__ */
#endif /* !__i386__ */
+
+ #ifdef CONFIG_X86_PTRACE_BTS
+ /* a branch trace record entry
+ *
+ * In order to unify the interface between various processor versions,
+ * we use the below data structure for all processors.
+ */
+ enum bts_qualifier {
+ BTS_INVALID = 0,
+ BTS_BRANCH,
+ BTS_TASK_ARRIVES,
+ BTS_TASK_DEPARTS
+ };
+
+ struct bts_struct {
+ __u64 qualifier;
+ union {
+ /* BTS_BRANCH */
+ struct {
+ __u64 from_ip;
+ __u64 to_ip;
+ } lbr;
+ /* BTS_TASK_ARRIVES or
+ BTS_TASK_DEPARTS */
+ __u64 jiffies;
+ } variant;
+ };
+ #endif /* CONFIG_X86_PTRACE_BTS */
+
#ifdef __KERNEL__
- /* the DS BTS struct is used for ptrace as well */
- #include <asm/ds.h>
+ #include <linux/init.h>
+ struct cpuinfo_x86;
struct task_struct;
+ #ifdef CONFIG_X86_PTRACE_BTS
+ extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *);
extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
+ #else
+ #define ptrace_bts_init_intel(config) do {} while (0)
+ #endif /* CONFIG_X86_PTRACE_BTS */
extern unsigned long profile_pc(struct pt_regs *regs);
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
#endif
+extern long syscall_trace_enter(struct pt_regs *);
+extern void syscall_trace_leave(struct pt_regs *);
+
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->ax;
#endif /* !__ASSEMBLY__ */
-#endif
+#endif /* ASM_X86__PTRACE_H */