2 * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
4 * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
5 * Author : Chanwoo Choi <cw00.choi@samsung.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
14 #include <linux/clk.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/of_address.h>
20 #include <linux/platform_device.h>
21 #include <linux/suspend.h>
22 #include <linux/devfreq-event.h>
24 #include "exynos-ppmu.h"
26 struct exynos_ppmu_data {
32 struct devfreq_event_dev **edev;
33 struct devfreq_event_desc *desc;
34 unsigned int num_events;
39 struct exynos_ppmu_data ppmu;
42 #define PPMU_EVENT(name) \
43 { "ppmu-event0-"#name, PPMU_PMNCNT0 }, \
44 { "ppmu-event1-"#name, PPMU_PMNCNT1 }, \
45 { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
46 { "ppmu-event3-"#name, PPMU_PMNCNT3 }
48 struct __exynos_ppmu_events {
52 /* For Exynos3250, Exynos4 and Exynos5260 */
56 /* For Exynos4 SoCs and Exynos3250 */
65 /* Only for Exynos3250 and Exynos5260 */
68 /* Only for Exynos4 SoCs */
70 PPMU_EVENT(mfc-right),
72 /* Only for Exynos5260 SoCs */
86 /* Only for Exynos5433 SoCs */
88 PPMU_EVENT(d0-general),
91 PPMU_EVENT(d1-general),
97 static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
101 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
102 if (!strcmp(edev->desc->name, ppmu_events[i].name))
103 return ppmu_events[i].id;
109 * The devfreq-event ops structure for PPMU v1.1
111 static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
113 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
116 /* Disable all counters */
117 __raw_writel(PPMU_CCNT_MASK |
122 info->ppmu.base + PPMU_CNTENC);
125 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
126 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
127 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
132 static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
134 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
135 int id = exynos_ppmu_find_ppmu_id(edev);
141 /* Enable specific counter */
142 cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
143 cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
144 __raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
146 /* Set the event of Read/Write data count */
147 __raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
148 info->ppmu.base + PPMU_BEVTxSEL(id));
150 /* Reset cycle counter/performance counter and enable PPMU */
151 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
152 pmnc &= ~(PPMU_PMNC_ENABLE_MASK
153 | PPMU_PMNC_COUNTER_RESET_MASK
154 | PPMU_PMNC_CC_RESET_MASK);
155 pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
156 pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
157 pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
158 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
163 static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
164 struct devfreq_event_data *edata)
166 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
167 int id = exynos_ppmu_find_ppmu_id(edev);
174 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
175 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
176 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
178 /* Read cycle count */
179 edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
181 /* Read performance count */
187 = __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
191 ((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
192 | __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
198 /* Disable specific counter */
199 cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
200 cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
201 __raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
203 dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
204 edata->load_count, edata->total_count);
209 static const struct devfreq_event_ops exynos_ppmu_ops = {
210 .disable = exynos_ppmu_disable,
211 .set_event = exynos_ppmu_set_event,
212 .get_event = exynos_ppmu_get_event,
216 * The devfreq-event ops structure for PPMU v2.0
218 static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
220 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
223 /* Disable all counters */
224 clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
225 | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
227 __raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
228 __raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
229 __raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
230 __raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
232 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
233 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
234 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
235 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
236 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
237 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
238 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
239 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
240 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
241 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
242 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
243 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
244 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
245 __raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
248 pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
249 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
250 __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
255 static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
257 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
258 int id = exynos_ppmu_find_ppmu_id(edev);
261 /* Enable all counters */
262 cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
263 cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
264 __raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
266 /* Set the event of Read/Write data count */
271 __raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
272 info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
275 __raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
276 info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
280 /* Reset cycle counter/performance counter and enable PPMU */
281 pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
282 pmnc &= ~(PPMU_PMNC_ENABLE_MASK
283 | PPMU_PMNC_COUNTER_RESET_MASK
284 | PPMU_PMNC_CC_RESET_MASK
285 | PPMU_PMNC_CC_DIVIDER_MASK
286 | PPMU_V2_PMNC_START_MODE_MASK);
287 pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
288 pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
289 pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
290 pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
291 __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
296 static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
297 struct devfreq_event_data *edata)
299 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
300 int id = exynos_ppmu_find_ppmu_id(edev);
302 u32 pmcnt_high, pmcnt_low;
306 pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
307 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
308 __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
310 /* Read cycle count and performance count */
311 edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
317 load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
320 pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
321 pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
322 load_count = (u64)((pmcnt_high & 0xff) << 32) + (u64)pmcnt_low;
325 edata->load_count = load_count;
327 /* Disable all counters */
328 cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
329 cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
330 __raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
332 dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
333 edata->load_count, edata->total_count);
337 static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
338 .disable = exynos_ppmu_v2_disable,
339 .set_event = exynos_ppmu_v2_set_event,
340 .get_event = exynos_ppmu_v2_get_event,
343 static const struct of_device_id exynos_ppmu_id_match[] = {
345 .compatible = "samsung,exynos-ppmu",
346 .data = (void *)&exynos_ppmu_ops,
348 .compatible = "samsung,exynos-ppmu-v2",
349 .data = (void *)&exynos_ppmu_v2_ops,
354 static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
356 const struct of_device_id *match;
358 match = of_match_node(exynos_ppmu_id_match, np);
359 return (struct devfreq_event_ops *)match->data;
362 static int of_get_devfreq_events(struct device_node *np,
363 struct exynos_ppmu *info)
365 struct devfreq_event_desc *desc;
366 struct devfreq_event_ops *event_ops;
367 struct device *dev = info->dev;
368 struct device_node *events_np, *node;
371 events_np = of_get_child_by_name(np, "events");
374 "failed to get child node of devfreq-event devices\n");
377 event_ops = exynos_bus_get_ops(np);
379 count = of_get_child_count(events_np);
380 desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
383 info->num_events = count;
386 for_each_child_of_node(events_np, node) {
387 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
388 if (!ppmu_events[i].name)
391 if (!of_node_cmp(node->name, ppmu_events[i].name))
395 if (i == ARRAY_SIZE(ppmu_events)) {
397 "don't know how to configure events : %s\n",
402 desc[j].ops = event_ops;
403 desc[j].driver_data = info;
405 of_property_read_string(node, "event-name", &desc[j].name);
413 of_node_put(events_np);
418 static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
420 struct device *dev = info->dev;
421 struct device_node *np = dev->of_node;
425 dev_err(dev, "failed to find devicetree node\n");
429 /* Maps the memory mapped IO to control PPMU register */
430 info->ppmu.base = of_iomap(np, 0);
431 if (IS_ERR_OR_NULL(info->ppmu.base)) {
432 dev_err(dev, "failed to map memory region\n");
436 info->ppmu.clk = devm_clk_get(dev, "ppmu");
437 if (IS_ERR(info->ppmu.clk)) {
438 info->ppmu.clk = NULL;
439 dev_warn(dev, "cannot get PPMU clock\n");
442 ret = of_get_devfreq_events(np, info);
444 dev_err(dev, "failed to parse exynos ppmu dt node\n");
451 iounmap(info->ppmu.base);
456 static int exynos_ppmu_probe(struct platform_device *pdev)
458 struct exynos_ppmu *info;
459 struct devfreq_event_dev **edev;
460 struct devfreq_event_desc *desc;
461 int i, ret = 0, size;
463 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
467 mutex_init(&info->lock);
468 info->dev = &pdev->dev;
470 /* Parse dt data to get resource */
471 ret = exynos_ppmu_parse_dt(info);
474 "failed to parse devicetree for resource\n");
479 size = sizeof(struct devfreq_event_dev *) * info->num_events;
480 info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
483 "failed to allocate memory devfreq-event devices\n");
487 platform_set_drvdata(pdev, info);
489 for (i = 0; i < info->num_events; i++) {
490 edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
491 if (IS_ERR(edev[i])) {
492 ret = PTR_ERR(edev[i]);
494 "failed to add devfreq-event device\n");
499 clk_prepare_enable(info->ppmu.clk);
503 iounmap(info->ppmu.base);
508 static int exynos_ppmu_remove(struct platform_device *pdev)
510 struct exynos_ppmu *info = platform_get_drvdata(pdev);
512 clk_disable_unprepare(info->ppmu.clk);
513 iounmap(info->ppmu.base);
518 static struct platform_driver exynos_ppmu_driver = {
519 .probe = exynos_ppmu_probe,
520 .remove = exynos_ppmu_remove,
522 .name = "exynos-ppmu",
523 .of_match_table = exynos_ppmu_id_match,
526 module_platform_driver(exynos_ppmu_driver);
528 MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
529 MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
530 MODULE_LICENSE("GPL");