2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
16 * Implementation of mpipe gxio calls.
19 #include <linux/errno.h>
21 #include <linux/module.h>
23 #include <gxio/iorpc_globals.h>
24 #include <gxio/iorpc_mpipe.h>
25 #include <gxio/iorpc_mpipe_info.h>
26 #include <gxio/kiorpc.h>
27 #include <gxio/mpipe.h>
29 /* HACK: Avoid pointless "shadow" warnings. */
30 #define link link_shadow
33 * strscpy - Copy a C-string into a sized buffer, but only if it fits
34 * @dest: Where to copy the string to
35 * @src: Where to copy the string from
36 * @size: size of destination buffer
38 * Use this routine to avoid copying too-long strings.
39 * The routine returns the total number of bytes copied
40 * (including the trailing NUL) or zero if the buffer wasn't
41 * big enough. To ensure that programmers pay attention
42 * to the return code, the destination has a single NUL
43 * written at the front (if size is non-zero) when the
44 * buffer is not big enough.
46 static size_t strscpy(char *dest, const char *src, size_t size)
48 size_t len = strnlen(src, size) + 1;
54 memcpy(dest, src, len);
58 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
65 if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
68 snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
69 fd = hv_dev_open((HV_VirtAddr) file, 0);
74 if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
80 /* Map in the MMIO space. */
81 context->mmio_cfg_base = (void __force *)
82 iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
83 HV_MPIPE_CONFIG_MMIO_SIZE);
84 if (context->mmio_cfg_base == NULL)
87 context->mmio_fast_base = (void __force *)
88 iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET,
89 HV_MPIPE_FAST_MMIO_SIZE);
90 if (context->mmio_fast_base == NULL)
93 /* Initialize the stacks. */
94 for (i = 0; i < 8; i++)
95 context->__stacks.stacks[i] = 255;
97 context->instance = mpipe_index;
102 iounmap((void __force __iomem *)(context->mmio_cfg_base));
104 hv_dev_close(context->fd);
109 EXPORT_SYMBOL_GPL(gxio_mpipe_init);
111 int gxio_mpipe_destroy(gxio_mpipe_context_t *context)
113 iounmap((void __force __iomem *)(context->mmio_cfg_base));
114 iounmap((void __force __iomem *)(context->mmio_fast_base));
115 return hv_dev_close(context->fd);
118 EXPORT_SYMBOL_GPL(gxio_mpipe_destroy);
120 static int16_t gxio_mpipe_buffer_sizes[8] =
121 { 128, 256, 512, 1024, 1664, 4096, 10368, 16384 };
123 gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t
127 for (i = 0; i < 7; i++)
128 if (size <= gxio_mpipe_buffer_sizes[i])
133 EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum);
135 size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
138 if (buffer_size_enum > 7)
139 buffer_size_enum = 7;
141 return gxio_mpipe_buffer_sizes[buffer_size_enum];
144 EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size);
146 size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers)
148 const int BUFFERS_PER_LINE = 12;
150 /* Count the number of cachlines. */
151 unsigned long lines =
152 (buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE;
154 /* Convert to bytes. */
155 return lines * CHIP_L2_LINE_SIZE();
158 EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes);
160 int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
162 gxio_mpipe_buffer_size_enum_t
163 buffer_size_enum, void *mem, size_t mem_size,
164 unsigned int mem_flags)
168 memset(mem, 0, mem_size);
170 result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size,
176 /* Save the stack. */
177 context->__stacks.stacks[buffer_size_enum] = stack;
182 EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack);
184 int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
186 void *mem, size_t mem_size,
187 unsigned int mem_flags)
189 return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size,
193 EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring);
195 int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context,
198 unsigned int num_rings,
200 unsigned int num_buckets,
201 gxio_mpipe_bucket_mode_t mode)
206 gxio_mpipe_bucket_info_t bucket_info = { {
212 gxio_mpipe_notif_group_bits_t bits = { {0} };
214 for (i = 0; i < num_rings; i++)
215 gxio_mpipe_notif_group_add_ring(&bits, ring + i);
217 result = gxio_mpipe_init_notif_group(context, group, bits);
221 for (i = 0; i < num_buckets; i++) {
222 bucket_info.notifring = ring + (i % num_rings);
224 result = gxio_mpipe_init_bucket(context, bucket + i,
233 EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets);
235 int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
236 unsigned int ring, unsigned int channel,
237 void *mem, size_t mem_size,
238 unsigned int mem_flags)
240 memset(mem, 0, mem_size);
242 return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags,
246 EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring);
248 void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
249 gxio_mpipe_context_t *context)
251 rules->context = context;
252 memset(&rules->list, 0, sizeof(rules->list));
255 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init);
257 int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
258 unsigned int bucket, unsigned int num_buckets,
259 gxio_mpipe_rules_stacks_t *stacks)
264 gxio_mpipe_rules_list_t *list = &rules->list;
267 gxio_mpipe_rules_rule_t *rule =
268 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
270 unsigned int head = list->tail;
273 * Align next rule properly.
274 *Note that "dmacs_and_vlans" will also be aligned.
276 unsigned int pad = 0;
277 while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0)
282 * ISSUE: Mark rules as broken on error?
284 if (head + pad + sizeof(*rule) >= sizeof(list->rules))
285 return GXIO_MPIPE_ERR_RULES_FULL;
287 /* Verify num_buckets is a power of 2. */
288 if (__builtin_popcount(num_buckets) != 1)
289 return GXIO_MPIPE_ERR_RULES_INVALID;
291 /* Add padding to previous rule. */
294 /* Start a new rule. */
295 list->head = head + pad;
297 rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
299 /* Default some values. */
302 rule->capacity = 16384;
304 /* Save the bucket info. */
305 rule->bucket_mask = num_buckets - 1;
306 rule->bucket_first = bucket;
308 for (i = 8 - 1; i >= 0; i--) {
310 stacks ? stacks->stacks[i] : rules->context->__stacks.
314 rule->stacks.stacks[i] = stack;
318 return GXIO_MPIPE_ERR_RULES_INVALID;
320 /* NOTE: Only entries at the end of the array can be 255. */
321 for (i = 8 - 1; i > 0; i--) {
322 if (rule->stacks.stacks[i] == 255) {
323 rule->stacks.stacks[i] = stack;
325 gxio_mpipe_buffer_size_enum_to_buffer_size(i -
330 rule->size = sizeof(*rule);
331 list->tail = list->head + rule->size;
336 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin);
338 int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
339 unsigned int channel)
341 gxio_mpipe_rules_list_t *list = &rules->list;
343 gxio_mpipe_rules_rule_t *rule =
344 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
346 /* Verify channel. */
348 return GXIO_MPIPE_ERR_RULES_INVALID;
352 return GXIO_MPIPE_ERR_RULES_EMPTY;
354 rule->channel_bits |= (1UL << channel);
359 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel);
361 int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom)
363 gxio_mpipe_rules_list_t *list = &rules->list;
365 gxio_mpipe_rules_rule_t *rule =
366 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
370 return GXIO_MPIPE_ERR_RULES_EMPTY;
372 rule->headroom = headroom;
377 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom);
379 int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules)
381 gxio_mpipe_rules_list_t *list = &rules->list;
383 offsetof(gxio_mpipe_rules_list_t, rules) + list->tail;
384 return gxio_mpipe_commit_rules(rules->context, list, size);
387 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit);
389 int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
390 gxio_mpipe_context_t *context,
392 void *mem, size_t mem_size, unsigned int mem_flags)
394 /* The init call below will verify that "mem_size" is legal. */
395 unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t);
397 iqueue->context = context;
398 iqueue->idescs = (gxio_mpipe_idesc_t *)mem;
400 iqueue->num_entries = num_entries;
401 iqueue->mask_num_entries = num_entries - 1;
402 iqueue->log2_num_entries = __builtin_ctz(num_entries);
404 #ifdef __BIG_ENDIAN__
408 /* Initialize the "tail". */
409 __gxio_mmio_write(mem, iqueue->head);
411 return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size,
415 EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
417 int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
418 gxio_mpipe_context_t *context,
420 unsigned int channel,
421 void *mem, unsigned int mem_size,
422 unsigned int mem_flags)
424 /* The init call below will verify that "mem_size" is legal. */
425 unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t);
427 /* Offset used to read number of completed commands. */
428 MPIPE_EDMA_POST_REGION_ADDR_t offset;
430 int result = gxio_mpipe_init_edma_ring(context, ering, channel,
431 mem, mem_size, mem_flags);
435 memset(equeue, 0, sizeof(*equeue));
439 MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
440 MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
443 __gxio_dma_queue_init(&equeue->dma_queue,
444 context->mmio_fast_base + offset.word,
446 equeue->edescs = mem;
447 equeue->mask_num_entries = num_entries - 1;
448 equeue->log2_num_entries = __builtin_ctz(num_entries);
449 equeue->context = context;
450 equeue->ering = ering;
451 equeue->channel = channel;
456 EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
458 int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
459 const struct timespec64 *ts)
461 cycles_t cycles = get_cycles();
462 return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
463 (uint64_t)ts->tv_nsec,
466 EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp);
468 int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
469 struct timespec64 *ts)
472 cycles_t cycles_prev, cycles_now, clock_rate;
473 cycles_prev = get_cycles();
474 ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec,
475 (uint64_t *)&ts->tv_nsec,
476 (uint64_t *)&cycles_now);
481 clock_rate = get_clock_rate();
482 ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate;
483 if (ts->tv_nsec < 0) {
484 ts->tv_nsec += 1000000000LL;
489 EXPORT_SYMBOL_GPL(gxio_mpipe_get_timestamp);
491 int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
493 return gxio_mpipe_adjust_timestamp_aux(context, delta);
495 EXPORT_SYMBOL_GPL(gxio_mpipe_adjust_timestamp);
497 /* Get our internal context used for link name access. This context is
498 * special in that it is not associated with an mPIPE service domain.
500 static gxio_mpipe_context_t *_gxio_get_link_context(void)
502 static gxio_mpipe_context_t context;
503 static gxio_mpipe_context_t *contextp;
504 static int tried_open = 0;
505 static DEFINE_MUTEX(mutex);
514 * "4" here is the maximum possible number of mPIPE shims; it's
515 * an exaggeration but we shouldn't ever go beyond 2 anyway.
517 for (i = 0; i < 4; i++) {
520 snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i);
521 context.fd = hv_dev_open((HV_VirtAddr) file, 0);
530 mutex_unlock(&mutex);
535 int gxio_mpipe_link_instance(const char *link_name)
537 _gxio_mpipe_link_name_t name;
538 gxio_mpipe_context_t *context = _gxio_get_link_context();
541 return GXIO_ERR_NO_DEVICE;
543 if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
544 return GXIO_ERR_NO_DEVICE;
546 return gxio_mpipe_info_instance_aux(context, name);
548 EXPORT_SYMBOL_GPL(gxio_mpipe_link_instance);
550 int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
553 _gxio_mpipe_link_name_t name;
554 _gxio_mpipe_link_mac_t mac;
556 gxio_mpipe_context_t *context = _gxio_get_link_context();
558 return GXIO_ERR_NO_DEVICE;
560 rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
562 if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
563 return GXIO_ERR_INVAL_MEMORY_SIZE;
564 memcpy(link_mac, mac.mac, sizeof(mac.mac));
570 EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac);
572 int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
573 gxio_mpipe_context_t *context, const char *link_name,
576 _gxio_mpipe_link_name_t name;
579 if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
580 return GXIO_ERR_NO_DEVICE;
582 rv = gxio_mpipe_link_open_aux(context, name, flags);
586 link->context = context;
587 link->channel = rv >> 8;
588 link->mac = rv & 0xFF;
593 EXPORT_SYMBOL_GPL(gxio_mpipe_link_open);
595 int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
597 return gxio_mpipe_link_close_aux(link->context, link->mac);
600 EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
602 int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
605 return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
609 EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);