]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
xen/trace: add mmu tracepoints
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Fri, 17 Dec 2010 01:02:35 +0000 (17:02 -0800)
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Mon, 18 Jul 2011 22:43:27 +0000 (15:43 -0700)
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
arch/x86/xen/mmu.c
include/trace/events/xen.h

index 0ccccb67a99300d3cd6277cb427382bdbe3ce3a5..43fa7771ccb9e475491f5b29c2424653393ac758 100644 (file)
@@ -48,6 +48,8 @@
 #include <linux/memblock.h>
 #include <linux/seq_file.h>
 
+#include <trace/events/xen.h>
+
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
@@ -194,6 +196,8 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
        struct multicall_space mcs;
        struct mmu_update *u;
 
+       trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
+
        mcs = xen_mc_entry(sizeof(*u));
        u = mcs.args;
 
@@ -245,6 +249,8 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
 
 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
 {
+       trace_xen_mmu_set_pmd(ptr, val);
+
        /* If page is not pinned, we can just update the entry
           directly */
        if (!xen_page_pinned(ptr)) {
@@ -282,22 +288,30 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
        return true;
 }
 
-static void xen_set_pte(pte_t *ptep, pte_t pteval)
+static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
 {
        if (!xen_batched_set_pte(ptep, pteval))
                native_set_pte(ptep, pteval);
 }
 
+static void xen_set_pte(pte_t *ptep, pte_t pteval)
+{
+       trace_xen_mmu_set_pte(ptep, pteval);
+       __xen_set_pte(ptep, pteval);
+}
+
 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
                    pte_t *ptep, pte_t pteval)
 {
-       xen_set_pte(ptep, pteval);
+       trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
+       __xen_set_pte(ptep, pteval);
 }
 
 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
                                 unsigned long addr, pte_t *ptep)
 {
        /* Just return the pte as-is.  We preserve the bits on commit */
+       trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
        return *ptep;
 }
 
@@ -306,6 +320,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
 {
        struct mmu_update u;
 
+       trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
        xen_mc_batch();
 
        u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
@@ -530,6 +545,8 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
 
 static void xen_set_pud(pud_t *ptr, pud_t val)
 {
+       trace_xen_mmu_set_pud(ptr, val);
+
        /* If page is not pinned, we can just update the entry
           directly */
        if (!xen_page_pinned(ptr)) {
@@ -543,17 +560,20 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
 #ifdef CONFIG_X86_PAE
 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
+       trace_xen_mmu_set_pte_atomic(ptep, pte);
        set_64bit((u64 *)ptep, native_pte_val(pte));
 }
 
 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
+       trace_xen_mmu_pte_clear(mm, addr, ptep);
        if (!xen_batched_set_pte(ptep, native_make_pte(0)))
                native_pte_clear(mm, addr, ptep);
 }
 
 static void xen_pmd_clear(pmd_t *pmdp)
 {
+       trace_xen_mmu_pmd_clear(pmdp);
        set_pmd(pmdp, __pmd(0));
 }
 #endif /* CONFIG_X86_PAE */
@@ -629,6 +649,8 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
 {
        pgd_t *user_ptr = xen_get_user_pgd(ptr);
 
+       trace_xen_mmu_set_pgd(ptr, user_ptr, val);
+
        /* If page is not pinned, we can just update the entry
           directly */
        if (!xen_page_pinned(ptr)) {
index 330848269bc126bd3797768512d0c93f5108cc5d..08089e82f77eed47cd8be04c77f63e73a367d769 100644 (file)
@@ -125,6 +125,229 @@ TRACE_EVENT(xen_mc_extend_args,
                      __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" :
                      __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???")
        );
+
+/* mmu */
+TRACE_EVENT(xen_mmu_set_pte,
+           TP_PROTO(pte_t *ptep, pte_t pteval),
+           TP_ARGS(ptep, pteval),
+           TP_STRUCT__entry(
+                   __field(pte_t *, ptep)
+                   __field(pteval_t, pteval)
+                   ),
+           TP_fast_assign(__entry->ptep = ptep;
+                          __entry->pteval = pteval.pte),
+           TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
+                     __entry->ptep,
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
+       );
+
+TRACE_EVENT(xen_mmu_set_pte_atomic,
+           TP_PROTO(pte_t *ptep, pte_t pteval),
+           TP_ARGS(ptep, pteval),
+           TP_STRUCT__entry(
+                   __field(pte_t *, ptep)
+                   __field(pteval_t, pteval)
+                   ),
+           TP_fast_assign(__entry->ptep = ptep;
+                          __entry->pteval = pteval.pte),
+           TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
+                     __entry->ptep,
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
+       );
+
+TRACE_EVENT(xen_mmu_set_domain_pte,
+           TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid),
+           TP_ARGS(ptep, pteval, domid),
+           TP_STRUCT__entry(
+                   __field(pte_t *, ptep)
+                   __field(pteval_t, pteval)
+                   __field(unsigned, domid)
+                   ),
+           TP_fast_assign(__entry->ptep = ptep;
+                          __entry->pteval = pteval.pte;
+                          __entry->domid = domid),
+           TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u",
+                     __entry->ptep,
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval,
+                     __entry->domid)
+       );
+
+TRACE_EVENT(xen_mmu_set_pte_at,
+           TP_PROTO(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t pteval),
+           TP_ARGS(mm, addr, ptep, pteval),
+           TP_STRUCT__entry(
+                   __field(struct mm_struct *, mm)
+                   __field(unsigned long, addr)
+                   __field(pte_t *, ptep)
+                   __field(pteval_t, pteval)
+                   ),
+           TP_fast_assign(__entry->mm = mm;
+                          __entry->addr = addr;
+                          __entry->ptep = ptep;
+                          __entry->pteval = pteval.pte),
+           TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
+                     __entry->mm, __entry->addr, __entry->ptep,
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
+       );
+
+TRACE_EVENT(xen_mmu_pte_clear,
+           TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
+           TP_ARGS(mm, addr, ptep),
+           TP_STRUCT__entry(
+                   __field(struct mm_struct *, mm)
+                   __field(unsigned long, addr)
+                   __field(pte_t *, ptep)
+                   ),
+           TP_fast_assign(__entry->mm = mm;
+                          __entry->addr = addr;
+                          __entry->ptep = ptep),
+           TP_printk("mm %p addr %lx ptep %p",
+                     __entry->mm, __entry->addr, __entry->ptep)
+       );
+
+TRACE_EVENT(xen_mmu_set_pmd,
+           TP_PROTO(pmd_t *pmdp, pmd_t pmdval),
+           TP_ARGS(pmdp, pmdval),
+           TP_STRUCT__entry(
+                   __field(pmd_t *, pmdp)
+                   __field(pmdval_t, pmdval)
+                   ),
+           TP_fast_assign(__entry->pmdp = pmdp;
+                          __entry->pmdval = pmdval.pmd),
+           TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)",
+                     __entry->pmdp,
+                     (int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)),
+                     (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
+       );
+
+TRACE_EVENT(xen_mmu_pmd_clear,
+           TP_PROTO(pmd_t *pmdp),
+           TP_ARGS(pmdp),
+           TP_STRUCT__entry(
+                   __field(pmd_t *, pmdp)
+                   ),
+           TP_fast_assign(__entry->pmdp = pmdp),
+           TP_printk("pmdp %p", __entry->pmdp)
+       );
+
+#if PAGETABLE_LEVELS >= 4
+
+TRACE_EVENT(xen_mmu_set_pud,
+           TP_PROTO(pud_t *pudp, pud_t pudval),
+           TP_ARGS(pudp, pudval),
+           TP_STRUCT__entry(
+                   __field(pud_t *, pudp)
+                   __field(pudval_t, pudval)
+                   ),
+           TP_fast_assign(__entry->pudp = pudp;
+                          __entry->pudval = native_pud_val(pudval)),
+           TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
+                     __entry->pudp,
+                     (int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)),
+                     (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
+       );
+
+TRACE_EVENT(xen_mmu_set_pgd,
+           TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval),
+           TP_ARGS(pgdp, user_pgdp, pgdval),
+           TP_STRUCT__entry(
+                   __field(pgd_t *, pgdp)
+                   __field(pgd_t *, user_pgdp)
+                   __field(pgdval_t, pgdval)
+                   ),
+           TP_fast_assign(__entry->pgdp = pgdp;
+                          __entry->user_pgdp = user_pgdp;
+                          __entry->pgdval = pgdval.pgd),
+           TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)",
+                     __entry->pgdp, __entry->user_pgdp,
+                     (int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)),
+                     (int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval)
+       );
+
+TRACE_EVENT(xen_mmu_pud_clear,
+           TP_PROTO(pud_t *pudp),
+           TP_ARGS(pudp),
+           TP_STRUCT__entry(
+                   __field(pud_t *, pudp)
+                   ),
+           TP_fast_assign(__entry->pudp = pudp),
+           TP_printk("pudp %p", __entry->pudp)
+       );
+#else
+
+TRACE_EVENT(xen_mmu_set_pud,
+           TP_PROTO(pud_t *pudp, pud_t pudval),
+           TP_ARGS(pudp, pudval),
+           TP_STRUCT__entry(
+                   __field(pud_t *, pudp)
+                   __field(pudval_t, pudval)
+                   ),
+           TP_fast_assign(__entry->pudp = pudp;
+                          __entry->pudval = native_pud_val(pudval)),
+           TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
+                     __entry->pudp,
+                     (int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)),
+                     (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
+       );
+
+#endif
+
+TRACE_EVENT(xen_mmu_pgd_clear,
+           TP_PROTO(pgd_t *pgdp),
+           TP_ARGS(pgdp),
+           TP_STRUCT__entry(
+                   __field(pgd_t *, pgdp)
+                   ),
+           TP_fast_assign(__entry->pgdp = pgdp),
+           TP_printk("pgdp %p", __entry->pgdp)
+       );
+
+TRACE_EVENT(xen_mmu_ptep_modify_prot_start,
+           TP_PROTO(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t pteval),
+           TP_ARGS(mm, addr, ptep, pteval),
+           TP_STRUCT__entry(
+                   __field(struct mm_struct *, mm)
+                   __field(unsigned long, addr)
+                   __field(pte_t *, ptep)
+                   __field(pteval_t, pteval)
+                   ),
+           TP_fast_assign(__entry->mm = mm;
+                          __entry->addr = addr;
+                          __entry->ptep = ptep;
+                          __entry->pteval = pteval.pte),
+           TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
+                     __entry->mm, __entry->addr, __entry->ptep,
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
+       );
+
+TRACE_EVENT(xen_mmu_ptep_modify_prot_commit,
+           TP_PROTO(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t pteval),
+           TP_ARGS(mm, addr, ptep, pteval),
+           TP_STRUCT__entry(
+                   __field(struct mm_struct *, mm)
+                   __field(unsigned long, addr)
+                   __field(pte_t *, ptep)
+                   __field(pteval_t, pteval)
+                   ),
+           TP_fast_assign(__entry->mm = mm;
+                          __entry->addr = addr;
+                          __entry->ptep = ptep;
+                          __entry->pteval = pteval.pte),
+           TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
+                     __entry->mm, __entry->addr, __entry->ptep,
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
+                     (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
+       );
+
+
 #endif /*  _TRACE_XEN_H */
 
 /* This part must be outside protection */