2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/list_sort.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon_reg.h"
32 #include "radeon_trace.h"
34 #define RADEON_CS_MAX_PRIORITY 32u
35 #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
37 /* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
41 struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
45 static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
53 static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
64 static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
75 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
77 struct radeon_cs_chunk *chunk;
78 struct radeon_cs_buckets buckets;
80 bool need_mmap_lock = false;
83 if (p->chunk_relocs == NULL) {
86 chunk = p->chunk_relocs;
88 /* FIXME: we assume that each relocs use 4 dwords */
89 p->nrelocs = chunk->length_dw / 4;
90 p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
91 if (p->relocs == NULL) {
95 radeon_cs_buckets_init(&buckets);
97 for (i = 0; i < p->nrelocs; i++) {
98 struct drm_radeon_cs_reloc *r;
99 struct drm_gem_object *gobj;
102 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
103 gobj = drm_gem_object_lookup(p->filp, r->handle);
105 DRM_ERROR("gem object lookup failed 0x%x\n",
109 p->relocs[i].robj = gem_to_radeon_bo(gobj);
111 /* The userspace buffer priorities are from 0 to 15. A higher
112 * number means the buffer is more important.
113 * Also, the buffers used for write have a higher priority than
114 * the buffers used for read only, which doubles the range
115 * to 0 to 31. 32 is reserved for the kernel driver.
117 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
120 /* the first reloc of an UVD job is the msg and that must be in
121 VRAM, also but everything into VRAM on AGP cards and older
122 IGP chips to avoid image corruptions */
123 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
124 (i == 0 || pci_find_capability(p->rdev->ddev->pdev,
126 p->rdev->family == CHIP_RS780 ||
127 p->rdev->family == CHIP_RS880)) {
129 /* TODO: is this still needed for NI+ ? */
130 p->relocs[i].prefered_domains =
131 RADEON_GEM_DOMAIN_VRAM;
133 p->relocs[i].allowed_domains =
134 RADEON_GEM_DOMAIN_VRAM;
136 /* prioritize this over any other relocation */
137 priority = RADEON_CS_MAX_PRIORITY;
139 uint32_t domain = r->write_domain ?
140 r->write_domain : r->read_domains;
142 if (domain & RADEON_GEM_DOMAIN_CPU) {
143 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
144 "for command submission\n");
148 p->relocs[i].prefered_domains = domain;
149 if (domain == RADEON_GEM_DOMAIN_VRAM)
150 domain |= RADEON_GEM_DOMAIN_GTT;
151 p->relocs[i].allowed_domains = domain;
154 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
155 uint32_t domain = p->relocs[i].prefered_domains;
156 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
157 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
158 "allowed for userptr BOs\n");
161 need_mmap_lock = true;
162 domain = RADEON_GEM_DOMAIN_GTT;
163 p->relocs[i].prefered_domains = domain;
164 p->relocs[i].allowed_domains = domain;
167 /* Objects shared as dma-bufs cannot be moved to VRAM */
168 if (p->relocs[i].robj->prime_shared_count) {
169 p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
170 if (!p->relocs[i].allowed_domains) {
171 DRM_ERROR("BO associated with dma-buf cannot "
172 "be moved to VRAM\n");
177 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
178 p->relocs[i].tv.shared = !r->write_domain;
180 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
184 radeon_cs_buckets_get_list(&buckets, &p->validated);
186 if (p->cs_flags & RADEON_CS_USE_VM)
187 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
190 down_read(¤t->mm->mmap_sem);
192 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
195 up_read(¤t->mm->mmap_sem);
200 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
202 p->priority = priority;
206 DRM_ERROR("unknown ring id: %d\n", ring);
208 case RADEON_CS_RING_GFX:
209 p->ring = RADEON_RING_TYPE_GFX_INDEX;
211 case RADEON_CS_RING_COMPUTE:
212 if (p->rdev->family >= CHIP_TAHITI) {
214 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
216 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
218 p->ring = RADEON_RING_TYPE_GFX_INDEX;
220 case RADEON_CS_RING_DMA:
221 if (p->rdev->family >= CHIP_CAYMAN) {
223 p->ring = R600_RING_TYPE_DMA_INDEX;
225 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
226 } else if (p->rdev->family >= CHIP_RV770) {
227 p->ring = R600_RING_TYPE_DMA_INDEX;
232 case RADEON_CS_RING_UVD:
233 p->ring = R600_RING_TYPE_UVD_INDEX;
235 case RADEON_CS_RING_VCE:
236 /* TODO: only use the low priority ring for now */
237 p->ring = TN_RING_TYPE_VCE1_INDEX;
243 static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
245 struct radeon_bo_list *reloc;
248 list_for_each_entry(reloc, &p->validated, tv.head) {
249 struct reservation_object *resv;
251 resv = reloc->robj->tbo.resv;
252 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
260 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
261 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
263 struct drm_radeon_cs *cs = data;
264 uint64_t *chunk_array_ptr;
266 u32 ring = RADEON_CS_RING_GFX;
269 INIT_LIST_HEAD(&p->validated);
271 if (!cs->num_chunks) {
278 p->const_ib.sa_bo = NULL;
280 p->chunk_relocs = NULL;
281 p->chunk_flags = NULL;
282 p->chunk_const_ib = NULL;
283 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
284 if (p->chunks_array == NULL) {
287 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
288 if (copy_from_user(p->chunks_array, chunk_array_ptr,
289 sizeof(uint64_t)*cs->num_chunks)) {
293 p->nchunks = cs->num_chunks;
294 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
295 if (p->chunks == NULL) {
298 for (i = 0; i < p->nchunks; i++) {
299 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
300 struct drm_radeon_cs_chunk user_chunk;
301 uint32_t __user *cdata;
303 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
304 if (copy_from_user(&user_chunk, chunk_ptr,
305 sizeof(struct drm_radeon_cs_chunk))) {
308 p->chunks[i].length_dw = user_chunk.length_dw;
309 if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
310 p->chunk_relocs = &p->chunks[i];
312 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
313 p->chunk_ib = &p->chunks[i];
314 /* zero length IB isn't useful */
315 if (p->chunks[i].length_dw == 0)
318 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
319 p->chunk_const_ib = &p->chunks[i];
320 /* zero length CONST IB isn't useful */
321 if (p->chunks[i].length_dw == 0)
324 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
325 p->chunk_flags = &p->chunks[i];
326 /* zero length flags aren't useful */
327 if (p->chunks[i].length_dw == 0)
331 size = p->chunks[i].length_dw;
332 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
333 p->chunks[i].user_ptr = cdata;
334 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
337 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
338 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
342 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
343 size *= sizeof(uint32_t);
344 if (p->chunks[i].kdata == NULL) {
347 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
350 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
351 p->cs_flags = p->chunks[i].kdata[0];
352 if (p->chunks[i].length_dw > 1)
353 ring = p->chunks[i].kdata[1];
354 if (p->chunks[i].length_dw > 2)
355 priority = (s32)p->chunks[i].kdata[2];
359 /* these are KMS only */
361 if ((p->cs_flags & RADEON_CS_USE_VM) &&
362 !p->rdev->vm_manager.enabled) {
363 DRM_ERROR("VM not active on asic!\n");
367 if (radeon_cs_get_ring(p, ring, priority))
370 /* we only support VM on some SI+ rings */
371 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
372 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
373 DRM_ERROR("Ring %d requires VM!\n", p->ring);
377 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
378 DRM_ERROR("VM not supported on ring %d!\n",
388 static int cmp_size_smaller_first(void *priv, struct list_head *a,
391 struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
392 struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
394 /* Sort A before B if A is smaller. */
395 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
399 * cs_parser_fini() - clean parser states
400 * @parser: parser structure holding parsing context.
401 * @error: error number
403 * If error is set than unvalidate buffer, otherwise just free memory
404 * used by parsing context.
406 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
411 /* Sort the buffer list from the smallest to largest buffer,
412 * which affects the order of buffers in the LRU list.
413 * This assures that the smallest buffers are added first
414 * to the LRU list, so they are likely to be later evicted
415 * first, instead of large buffers whose eviction is more
418 * This slightly lowers the number of bytes moved by TTM
419 * per frame under memory pressure.
421 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
423 ttm_eu_fence_buffer_objects(&parser->ticket,
425 &parser->ib.fence->base);
426 } else if (backoff) {
427 ttm_eu_backoff_reservation(&parser->ticket,
431 if (parser->relocs != NULL) {
432 for (i = 0; i < parser->nrelocs; i++) {
433 struct radeon_bo *bo = parser->relocs[i].robj;
437 drm_gem_object_unreference_unlocked(&bo->gem_base);
440 kfree(parser->track);
441 drm_free_large(parser->relocs);
442 drm_free_large(parser->vm_bos);
443 for (i = 0; i < parser->nchunks; i++)
444 drm_free_large(parser->chunks[i].kdata);
445 kfree(parser->chunks);
446 kfree(parser->chunks_array);
447 radeon_ib_free(parser->rdev, &parser->ib);
448 radeon_ib_free(parser->rdev, &parser->const_ib);
451 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
452 struct radeon_cs_parser *parser)
456 if (parser->chunk_ib == NULL)
459 if (parser->cs_flags & RADEON_CS_USE_VM)
462 r = radeon_cs_parse(rdev, parser->ring, parser);
463 if (r || parser->parser_error) {
464 DRM_ERROR("Invalid command stream !\n");
468 r = radeon_cs_sync_rings(parser);
470 if (r != -ERESTARTSYS)
471 DRM_ERROR("Failed to sync rings: %i\n", r);
475 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
476 radeon_uvd_note_usage(rdev);
477 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
478 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
479 radeon_vce_note_usage(rdev);
481 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
483 DRM_ERROR("Failed to schedule IB !\n");
488 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
489 struct radeon_vm *vm)
491 struct radeon_device *rdev = p->rdev;
492 struct radeon_bo_va *bo_va;
495 r = radeon_vm_update_page_directory(rdev, vm);
499 r = radeon_vm_clear_freed(rdev, vm);
503 if (vm->ib_bo_va == NULL) {
504 DRM_ERROR("Tmp BO not in VM!\n");
508 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
509 &rdev->ring_tmp_bo.bo->tbo.mem);
513 for (i = 0; i < p->nrelocs; i++) {
514 struct radeon_bo *bo;
516 bo = p->relocs[i].robj;
517 bo_va = radeon_vm_bo_find(vm, bo);
519 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
523 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
527 radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
530 return radeon_vm_clear_invalids(rdev, vm);
533 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
534 struct radeon_cs_parser *parser)
536 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
537 struct radeon_vm *vm = &fpriv->vm;
540 if (parser->chunk_ib == NULL)
542 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
545 if (parser->const_ib.length_dw) {
546 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
552 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
557 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
558 radeon_uvd_note_usage(rdev);
560 mutex_lock(&vm->mutex);
561 r = radeon_bo_vm_update_pte(parser, vm);
566 r = radeon_cs_sync_rings(parser);
568 if (r != -ERESTARTSYS)
569 DRM_ERROR("Failed to sync rings: %i\n", r);
573 if ((rdev->family >= CHIP_TAHITI) &&
574 (parser->chunk_const_ib != NULL)) {
575 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
577 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
581 mutex_unlock(&vm->mutex);
585 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
588 r = radeon_gpu_reset(rdev);
595 static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
597 struct radeon_cs_chunk *ib_chunk;
598 struct radeon_vm *vm = NULL;
601 if (parser->chunk_ib == NULL)
604 if (parser->cs_flags & RADEON_CS_USE_VM) {
605 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
608 if ((rdev->family >= CHIP_TAHITI) &&
609 (parser->chunk_const_ib != NULL)) {
610 ib_chunk = parser->chunk_const_ib;
611 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
612 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
615 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
616 vm, ib_chunk->length_dw * 4);
618 DRM_ERROR("Failed to get const ib !\n");
621 parser->const_ib.is_const_ib = true;
622 parser->const_ib.length_dw = ib_chunk->length_dw;
623 if (copy_from_user(parser->const_ib.ptr,
625 ib_chunk->length_dw * 4))
629 ib_chunk = parser->chunk_ib;
630 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
631 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
635 ib_chunk = parser->chunk_ib;
637 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
638 vm, ib_chunk->length_dw * 4);
640 DRM_ERROR("Failed to get ib !\n");
643 parser->ib.length_dw = ib_chunk->length_dw;
645 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
646 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
651 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
653 struct radeon_device *rdev = dev->dev_private;
654 struct radeon_cs_parser parser;
657 down_read(&rdev->exclusive_lock);
658 if (!rdev->accel_working) {
659 up_read(&rdev->exclusive_lock);
662 if (rdev->in_reset) {
663 up_read(&rdev->exclusive_lock);
664 r = radeon_gpu_reset(rdev);
669 /* initialize parser */
670 memset(&parser, 0, sizeof(struct radeon_cs_parser));
673 parser.dev = rdev->dev;
674 parser.family = rdev->family;
675 r = radeon_cs_parser_init(&parser, data);
677 DRM_ERROR("Failed to initialize parser !\n");
678 radeon_cs_parser_fini(&parser, r, false);
679 up_read(&rdev->exclusive_lock);
680 r = radeon_cs_handle_lockup(rdev, r);
684 r = radeon_cs_ib_fill(rdev, &parser);
686 r = radeon_cs_parser_relocs(&parser);
687 if (r && r != -ERESTARTSYS)
688 DRM_ERROR("Failed to parse relocation %d!\n", r);
692 radeon_cs_parser_fini(&parser, r, false);
693 up_read(&rdev->exclusive_lock);
694 r = radeon_cs_handle_lockup(rdev, r);
698 trace_radeon_cs(&parser);
700 r = radeon_cs_ib_chunk(rdev, &parser);
704 r = radeon_cs_ib_vm_chunk(rdev, &parser);
709 radeon_cs_parser_fini(&parser, r, true);
710 up_read(&rdev->exclusive_lock);
711 r = radeon_cs_handle_lockup(rdev, r);
716 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
717 * @parser: parser structure holding parsing context.
718 * @pkt: where to store packet information
720 * Assume that chunk_ib_index is properly set. Will return -EINVAL
721 * if packet is bigger than remaining ib size. or if packets is unknown.
723 int radeon_cs_packet_parse(struct radeon_cs_parser *p,
724 struct radeon_cs_packet *pkt,
727 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
728 struct radeon_device *rdev = p->rdev;
732 if (idx >= ib_chunk->length_dw) {
733 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
734 idx, ib_chunk->length_dw);
737 header = radeon_get_ib_value(p, idx);
739 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
740 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
743 case RADEON_PACKET_TYPE0:
744 if (rdev->family < CHIP_R600) {
745 pkt->reg = R100_CP_PACKET0_GET_REG(header);
747 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
749 pkt->reg = R600_CP_PACKET0_GET_REG(header);
751 case RADEON_PACKET_TYPE3:
752 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
754 case RADEON_PACKET_TYPE2:
758 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
762 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
763 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
764 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
771 for (i = 0; i < ib_chunk->length_dw; i++) {
773 printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
775 printk("\t0x%08x\n", radeon_get_ib_value(p, i));
781 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
782 * @p: structure holding the parser context.
784 * Check if the next packet is NOP relocation packet3.
786 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
788 struct radeon_cs_packet p3reloc;
791 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
794 if (p3reloc.type != RADEON_PACKET_TYPE3)
796 if (p3reloc.opcode != RADEON_PACKET3_NOP)
802 * radeon_cs_dump_packet() - dump raw packet context
803 * @p: structure holding the parser context.
804 * @pkt: structure holding the packet.
806 * Used mostly for debugging and error reporting.
808 void radeon_cs_dump_packet(struct radeon_cs_parser *p,
809 struct radeon_cs_packet *pkt)
811 volatile uint32_t *ib;
817 for (i = 0; i <= (pkt->count + 1); i++, idx++)
818 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
822 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
823 * @parser: parser structure holding parsing context.
824 * @data: pointer to relocation data
825 * @offset_start: starting offset
826 * @offset_mask: offset mask (to align start offset on)
827 * @reloc: reloc informations
829 * Check if next packet is relocation packet3, do bo validation and compute
830 * GPU offset using the provided start.
832 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
833 struct radeon_bo_list **cs_reloc,
836 struct radeon_cs_chunk *relocs_chunk;
837 struct radeon_cs_packet p3reloc;
841 if (p->chunk_relocs == NULL) {
842 DRM_ERROR("No relocation chunk !\n");
846 relocs_chunk = p->chunk_relocs;
847 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
850 p->idx += p3reloc.count + 2;
851 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
852 p3reloc.opcode != RADEON_PACKET3_NOP) {
853 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
855 radeon_cs_dump_packet(p, &p3reloc);
858 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
859 if (idx >= relocs_chunk->length_dw) {
860 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
861 idx, relocs_chunk->length_dw);
862 radeon_cs_dump_packet(p, &p3reloc);
865 /* FIXME: we assume reloc size is 4 dwords */
867 *cs_reloc = p->relocs;
868 (*cs_reloc)->gpu_offset =
869 (u64)relocs_chunk->kdata[idx + 3] << 32;
870 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
872 *cs_reloc = &p->relocs[(idx / 4)];