2 #define TRACE_SYSTEM compaction
4 #if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_COMPACTION_H
7 #include <linux/types.h>
8 #include <linux/list.h>
9 #include <linux/tracepoint.h>
10 #include <trace/events/mmflags.h>
12 #define COMPACTION_STATUS \
13 EM( COMPACT_SKIPPED, "skipped") \
14 EM( COMPACT_DEFERRED, "deferred") \
15 EM( COMPACT_CONTINUE, "continue") \
16 EM( COMPACT_PARTIAL, "partial") \
17 EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
18 EM( COMPACT_COMPLETE, "complete") \
19 EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
20 EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \
21 EMe(COMPACT_CONTENDED, "contended")
23 #ifdef CONFIG_ZONE_DMA
24 #define IFDEF_ZONE_DMA(X) X
26 #define IFDEF_ZONE_DMA(X)
29 #ifdef CONFIG_ZONE_DMA32
30 #define IFDEF_ZONE_DMA32(X) X
32 #define IFDEF_ZONE_DMA32(X)
36 #define IFDEF_ZONE_HIGHMEM(X) X
38 #define IFDEF_ZONE_HIGHMEM(X)
42 IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \
43 IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \
44 EM (ZONE_NORMAL, "Normal") \
45 IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \
46 EMe(ZONE_MOVABLE,"Movable")
49 * First define the enums in the above macros to be exported to userspace
50 * via TRACE_DEFINE_ENUM().
54 #define EM(a, b) TRACE_DEFINE_ENUM(a);
55 #define EMe(a, b) TRACE_DEFINE_ENUM(a);
61 * Now redefine the EM() and EMe() macros to map the enums to the strings
62 * that will be printed in the output.
66 #define EM(a, b) {a, b},
67 #define EMe(a, b) {a, b}
69 DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
72 unsigned long start_pfn,
73 unsigned long end_pfn,
74 unsigned long nr_scanned,
75 unsigned long nr_taken),
77 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken),
80 __field(unsigned long, start_pfn)
81 __field(unsigned long, end_pfn)
82 __field(unsigned long, nr_scanned)
83 __field(unsigned long, nr_taken)
87 __entry->start_pfn = start_pfn;
88 __entry->end_pfn = end_pfn;
89 __entry->nr_scanned = nr_scanned;
90 __entry->nr_taken = nr_taken;
93 TP_printk("range=(0x%lx ~ 0x%lx) nr_scanned=%lu nr_taken=%lu",
100 DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
103 unsigned long start_pfn,
104 unsigned long end_pfn,
105 unsigned long nr_scanned,
106 unsigned long nr_taken),
108 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
111 DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
114 unsigned long start_pfn,
115 unsigned long end_pfn,
116 unsigned long nr_scanned,
117 unsigned long nr_taken),
119 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
122 TRACE_EVENT(mm_compaction_migratepages,
124 TP_PROTO(unsigned long nr_all,
126 struct list_head *migratepages),
128 TP_ARGS(nr_all, migrate_rc, migratepages),
131 __field(unsigned long, nr_migrated)
132 __field(unsigned long, nr_failed)
136 unsigned long nr_failed = 0;
137 struct list_head *page_lru;
140 * migrate_pages() returns either a non-negative number
141 * with the number of pages that failed migration, or an
142 * error code, in which case we need to count the remaining
146 nr_failed = migrate_rc;
148 list_for_each(page_lru, migratepages)
151 __entry->nr_migrated = nr_all - nr_failed;
152 __entry->nr_failed = nr_failed;
155 TP_printk("nr_migrated=%lu nr_failed=%lu",
156 __entry->nr_migrated,
160 TRACE_EVENT(mm_compaction_begin,
161 TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
162 unsigned long free_pfn, unsigned long zone_end, bool sync),
164 TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync),
167 __field(unsigned long, zone_start)
168 __field(unsigned long, migrate_pfn)
169 __field(unsigned long, free_pfn)
170 __field(unsigned long, zone_end)
175 __entry->zone_start = zone_start;
176 __entry->migrate_pfn = migrate_pfn;
177 __entry->free_pfn = free_pfn;
178 __entry->zone_end = zone_end;
179 __entry->sync = sync;
182 TP_printk("zone_start=0x%lx migrate_pfn=0x%lx free_pfn=0x%lx zone_end=0x%lx, mode=%s",
184 __entry->migrate_pfn,
187 __entry->sync ? "sync" : "async")
190 TRACE_EVENT(mm_compaction_end,
191 TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
192 unsigned long free_pfn, unsigned long zone_end, bool sync,
195 TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync, status),
198 __field(unsigned long, zone_start)
199 __field(unsigned long, migrate_pfn)
200 __field(unsigned long, free_pfn)
201 __field(unsigned long, zone_end)
207 __entry->zone_start = zone_start;
208 __entry->migrate_pfn = migrate_pfn;
209 __entry->free_pfn = free_pfn;
210 __entry->zone_end = zone_end;
211 __entry->sync = sync;
212 __entry->status = status;
215 TP_printk("zone_start=0x%lx migrate_pfn=0x%lx free_pfn=0x%lx zone_end=0x%lx, mode=%s status=%s",
217 __entry->migrate_pfn,
220 __entry->sync ? "sync" : "async",
221 __print_symbolic(__entry->status, COMPACTION_STATUS))
224 TRACE_EVENT(mm_compaction_try_to_compact_pages,
231 TP_ARGS(order, gfp_mask, prio),
235 __field(gfp_t, gfp_mask)
240 __entry->order = order;
241 __entry->gfp_mask = gfp_mask;
242 __entry->prio = prio;
245 TP_printk("order=%d gfp_mask=0x%x priority=%d",
251 DECLARE_EVENT_CLASS(mm_compaction_suitable_template,
253 TP_PROTO(struct zone *zone,
257 TP_ARGS(zone, order, ret),
261 __field(enum zone_type, idx)
267 __entry->nid = zone_to_nid(zone);
268 __entry->idx = zone_idx(zone);
269 __entry->order = order;
273 TP_printk("node=%d zone=%-8s order=%d ret=%s",
275 __print_symbolic(__entry->idx, ZONE_TYPE),
277 __print_symbolic(__entry->ret, COMPACTION_STATUS))
280 DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_finished,
282 TP_PROTO(struct zone *zone,
286 TP_ARGS(zone, order, ret)
289 DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_suitable,
291 TP_PROTO(struct zone *zone,
295 TP_ARGS(zone, order, ret)
298 #ifdef CONFIG_COMPACTION
299 DECLARE_EVENT_CLASS(mm_compaction_defer_template,
301 TP_PROTO(struct zone *zone, int order),
303 TP_ARGS(zone, order),
307 __field(enum zone_type, idx)
309 __field(unsigned int, considered)
310 __field(unsigned int, defer_shift)
311 __field(int, order_failed)
315 __entry->nid = zone_to_nid(zone);
316 __entry->idx = zone_idx(zone);
317 __entry->order = order;
318 __entry->considered = zone->compact_considered;
319 __entry->defer_shift = zone->compact_defer_shift;
320 __entry->order_failed = zone->compact_order_failed;
323 TP_printk("node=%d zone=%-8s order=%d order_failed=%d consider=%u limit=%lu",
325 __print_symbolic(__entry->idx, ZONE_TYPE),
327 __entry->order_failed,
329 1UL << __entry->defer_shift)
332 DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_deferred,
334 TP_PROTO(struct zone *zone, int order),
339 DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_compaction,
341 TP_PROTO(struct zone *zone, int order),
346 DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset,
348 TP_PROTO(struct zone *zone, int order),
354 TRACE_EVENT(mm_compaction_kcompactd_sleep,
368 TP_printk("nid=%d", __entry->nid)
371 DECLARE_EVENT_CLASS(kcompactd_wake_template,
373 TP_PROTO(int nid, int order, enum zone_type classzone_idx),
375 TP_ARGS(nid, order, classzone_idx),
380 __field(enum zone_type, classzone_idx)
385 __entry->order = order;
386 __entry->classzone_idx = classzone_idx;
389 TP_printk("nid=%d order=%d classzone_idx=%-8s",
392 __print_symbolic(__entry->classzone_idx, ZONE_TYPE))
395 DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd,
397 TP_PROTO(int nid, int order, enum zone_type classzone_idx),
399 TP_ARGS(nid, order, classzone_idx)
402 DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake,
404 TP_PROTO(int nid, int order, enum zone_type classzone_idx),
406 TP_ARGS(nid, order, classzone_idx)
409 #endif /* _TRACE_COMPACTION_H */
411 /* This part must be outside protection */
412 #include <trace/define_trace.h>