2 * Tegra host1x Syncpoints
4 * Copyright (c) 2010-2015, NVIDIA Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/module.h>
20 #include <linux/device.h>
21 #include <linux/slab.h>
23 #include <trace/events/host1x.h>
30 #define SYNCPT_CHECK_PERIOD (2 * HZ)
31 #define MAX_STUCK_CHECK_COUNT 15
33 static struct host1x_syncpt_base *
34 host1x_syncpt_base_request(struct host1x *host)
36 struct host1x_syncpt_base *bases = host->bases;
39 for (i = 0; i < host->info->nb_bases; i++)
40 if (!bases[i].requested)
43 if (i >= host->info->nb_bases)
46 bases[i].requested = true;
50 static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
53 base->requested = false;
56 static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
61 struct host1x_syncpt *sp = host->syncpt;
64 mutex_lock(&host->syncpt_mutex);
66 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
69 if (i >= host->info->nb_pts)
72 if (flags & HOST1X_SYNCPT_HAS_BASE) {
73 sp->base = host1x_syncpt_base_request(host);
78 name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
79 dev ? dev_name(dev) : NULL);
86 if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
87 sp->client_managed = true;
89 sp->client_managed = false;
91 mutex_unlock(&host->syncpt_mutex);
95 host1x_syncpt_base_free(sp->base);
98 mutex_unlock(&host->syncpt_mutex);
102 u32 host1x_syncpt_id(struct host1x_syncpt *sp)
106 EXPORT_SYMBOL(host1x_syncpt_id);
109 * Updates the value sent to hardware.
111 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
113 return (u32)atomic_add_return(incrs, &sp->max_val);
115 EXPORT_SYMBOL(host1x_syncpt_incr_max);
118 * Write cached syncpoint and waitbase values to hardware.
120 void host1x_syncpt_restore(struct host1x *host)
122 struct host1x_syncpt *sp_base = host->syncpt;
125 for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
126 host1x_hw_syncpt_restore(host, sp_base + i);
128 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
129 host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
135 * Update the cached syncpoint and waitbase values by reading them
136 * from the registers.
138 void host1x_syncpt_save(struct host1x *host)
140 struct host1x_syncpt *sp_base = host->syncpt;
143 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
144 if (host1x_syncpt_client_managed(sp_base + i))
145 host1x_hw_syncpt_load(host, sp_base + i);
147 WARN_ON(!host1x_syncpt_idle(sp_base + i));
150 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
151 host1x_hw_syncpt_load_wait_base(host, sp_base + i);
155 * Updates the cached syncpoint value by reading a new value from the hardware
158 u32 host1x_syncpt_load(struct host1x_syncpt *sp)
162 val = host1x_hw_syncpt_load(sp->host, sp);
163 trace_host1x_syncpt_load_min(sp->id, val);
169 * Get the current syncpoint base
171 u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
173 host1x_hw_syncpt_load_wait_base(sp->host, sp);
179 * Increment syncpoint value from cpu, updating cache
181 int host1x_syncpt_incr(struct host1x_syncpt *sp)
183 return host1x_hw_syncpt_cpu_incr(sp->host, sp);
185 EXPORT_SYMBOL(host1x_syncpt_incr);
188 * Updated sync point form hardware, and returns true if syncpoint is expired,
189 * false if we may need to wait
191 static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
193 host1x_hw_syncpt_load(sp->host, sp);
195 return host1x_syncpt_is_expired(sp, thresh);
199 * Main entrypoint for syncpoint value waits.
201 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
204 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
206 struct host1x_waitlist *waiter;
207 int err = 0, check_count = 0;
213 /* first check cache */
214 if (host1x_syncpt_is_expired(sp, thresh)) {
216 *value = host1x_syncpt_load(sp);
221 /* try to read from register */
222 val = host1x_hw_syncpt_load(sp->host, sp);
223 if (host1x_syncpt_is_expired(sp, thresh)) {
235 /* allocate a waiter */
236 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
242 /* schedule a wakeup when the syncpoint value is reached */
243 err = host1x_intr_add_action(sp->host, sp->id, thresh,
244 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
250 /* Caller-specified timeout may be impractically low */
254 /* wait for the syncpoint, or timeout, or signal */
256 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
259 remain = wait_event_interruptible_timeout(wq,
260 syncpt_load_min_is_expired(sp, thresh),
262 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
264 *value = host1x_syncpt_load(sp);
278 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
279 dev_warn(sp->host->dev,
280 "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
281 current->comm, sp->id, sp->name,
284 host1x_debug_dump_syncpts(sp->host);
286 if (check_count == MAX_STUCK_CHECK_COUNT)
287 host1x_debug_dump(sp->host);
293 host1x_intr_put_ref(sp->host, sp->id, ref);
298 EXPORT_SYMBOL(host1x_syncpt_wait);
301 * Returns true if syncpoint is expired, false if we may need to wait
303 bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
310 current_val = (u32)atomic_read(&sp->min_val);
311 future_val = (u32)atomic_read(&sp->max_val);
313 /* Note the use of unsigned arithmetic here (mod 1<<32).
315 * c = current_val = min_val = the current value of the syncpoint.
316 * t = thresh = the value we are checking
317 * f = future_val = max_val = the value c will reach when all
318 * outstanding increments have completed.
320 * Note that c always chases f until it reaches f.
325 * Consider all cases:
327 * A) .....c..t..f..... Dtf < Dtc need to wait
328 * B) .....c.....f..t.. Dtf > Dtc expired
329 * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
331 * Any case where f==c: always expired (for any t). Dtf == Dcf
332 * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
333 * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
338 * A) .....t..f..c..... Dtf < Dtc need to wait
339 * A) .....f..c..t..... Dtf < Dtc need to wait
340 * A) .....f..t..c..... Dtf > Dtc expired
343 * Dtf >= Dtc implies EXPIRED (return true)
344 * Dtf < Dtc implies WAIT (return false)
346 * Note: If t is expired then we *cannot* wait on it. We would wait
347 * forever (hang the system).
349 * Note: do NOT get clever and remove the -thresh from both sides. It
352 * If future valueis zero, we have a client managed sync point. In that
353 * case we do a direct comparison.
355 if (!host1x_syncpt_client_managed(sp))
356 return future_val - thresh >= current_val - thresh;
358 return (s32)(current_val - thresh) >= 0;
361 /* remove a wait pointed to by patch_addr */
362 int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
364 return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
367 int host1x_syncpt_init(struct host1x *host)
369 struct host1x_syncpt_base *bases;
370 struct host1x_syncpt *syncpt;
373 syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
378 bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
383 for (i = 0; i < host->info->nb_pts; i++) {
385 syncpt[i].host = host;
388 for (i = 0; i < host->info->nb_bases; i++)
391 mutex_init(&host->syncpt_mutex);
392 host->syncpt = syncpt;
395 host1x_syncpt_restore(host);
397 /* Allocate sync point to use for clearing waits for expired fences */
398 host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
405 struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
408 struct host1x *host = dev_get_drvdata(dev->parent);
410 return host1x_syncpt_alloc(host, dev, flags);
412 EXPORT_SYMBOL(host1x_syncpt_request);
414 void host1x_syncpt_free(struct host1x_syncpt *sp)
419 mutex_lock(&sp->host->syncpt_mutex);
421 host1x_syncpt_base_free(sp->base);
426 sp->client_managed = false;
428 mutex_unlock(&sp->host->syncpt_mutex);
430 EXPORT_SYMBOL(host1x_syncpt_free);
432 void host1x_syncpt_deinit(struct host1x *host)
434 struct host1x_syncpt *sp = host->syncpt;
437 for (i = 0; i < host->info->nb_pts; i++, sp++)
442 * Read max. It indicates how many operations there are in queue, either in
443 * channel or in a software thread.
445 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
449 return (u32)atomic_read(&sp->max_val);
451 EXPORT_SYMBOL(host1x_syncpt_read_max);
454 * Read min, which is a shadow of the current sync point value in hardware.
456 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
460 return (u32)atomic_read(&sp->min_val);
462 EXPORT_SYMBOL(host1x_syncpt_read_min);
464 u32 host1x_syncpt_read(struct host1x_syncpt *sp)
466 return host1x_syncpt_load(sp);
468 EXPORT_SYMBOL(host1x_syncpt_read);
470 unsigned int host1x_syncpt_nb_pts(struct host1x *host)
472 return host->info->nb_pts;
475 unsigned int host1x_syncpt_nb_bases(struct host1x *host)
477 return host->info->nb_bases;
480 unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
482 return host->info->nb_mlocks;
485 struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
487 if (host->info->nb_pts < id)
490 return host->syncpt + id;
492 EXPORT_SYMBOL(host1x_syncpt_get);
494 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
496 return sp ? sp->base : NULL;
498 EXPORT_SYMBOL(host1x_syncpt_get_base);
500 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
504 EXPORT_SYMBOL(host1x_syncpt_base_id);