Skip to content

Commit baa0db4

Browse files
committed
accel/amdxdna: Add KVM/QEMU support for expandable device heap
Add support for device heap expansion when running under KVM/QEMU where heap chunks are imported via dma-buf rather than allocated as shmem objects. - amdxdna_gem_dev_obj_vmap: Handle imported heap chunks that have sgt (scatter-gather table) but no pages array. Use for_each_sgtable_page() to gather pages from the chunk's sgt when chunk->base.pages is NULL. Add error logging for chunks with neither pages nor sgt, and for page count mismatches. - amdxdna_gem_vmap: Include BO type in error messages for easier debugging. - amdxdna_drm_expand_dev_heap: Pre-set expansion chunk UVA based on the heap base address plus current total heap size, so GET_BO_INFO returns the expected contiguous address for the shim/vxdna to MAP_FIXED mmap at. Signed-off-by: Wendy Liang <wendy.liang@amd.com>
1 parent 52b6cd5 commit baa0db4

1 file changed

Lines changed: 38 additions & 6 deletions

File tree

drivers/accel/amdxdna/amdxdna_gem.c

Lines changed: 38 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,9 @@ void *amdxdna_gem_vmap(struct amdxdna_gem_obj *abo)
158158
if (!abo->mem.kva) {
159159
ret = drm_gem_vmap(to_gobj(abo), &map);
160160
if (ret)
161-
XDNA_ERR(abo->client->xdna, "Vmap bo failed, ret %d", ret);
161+
XDNA_ERR(abo->client->xdna,
162+
"Vmap bo failed, ret %d, type %d",
163+
ret, abo->type);
162164
else
163165
abo->mem.kva = map.vaddr;
164166
}
@@ -654,7 +656,8 @@ static int amdxdna_gem_dev_obj_vmap(struct drm_gem_object *obj, struct iosys_map
654656
{
655657
struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
656658
struct amdxdna_client *client = abo->client;
657-
u64 dev_base = client->xdna->dev_info->dev_mem_base;
659+
struct amdxdna_dev *xdna = client->xdna;
660+
u64 dev_base = xdna->dev_info->dev_mem_base;
658661
u64 bo_start = abo->mm_node.start - dev_base;
659662
u64 bo_end = bo_start + abo->mm_node.size;
660663
unsigned long nr_pages = abo->mm_node.size >> PAGE_SHIFT;
@@ -683,20 +686,37 @@ static int amdxdna_gem_dev_obj_vmap(struct drm_gem_object *obj, struct iosys_map
683686
first_pg = overlap_start >> PAGE_SHIFT;
684687
last_pg = overlap_end >> PAGE_SHIFT;
685688

686-
if (!chunk->base.pages) {
689+
if (chunk->base.pages) {
690+
for (i = first_pg; i < last_pg; i++)
691+
pages[pg_idx++] = chunk->base.pages[i];
692+
} else if (chunk->base.sgt) {
693+
struct sg_page_iter piter;
694+
unsigned long cnt = 0;
695+
696+
for_each_sgtable_page(chunk->base.sgt,
697+
&piter, first_pg) {
698+
if (cnt >= last_pg - first_pg)
699+
break;
700+
pages[pg_idx++] =
701+
sg_page_iter_page(&piter);
702+
cnt++;
703+
}
704+
} else {
705+
XDNA_ERR(xdna,
706+
"Heap chunk (offset 0x%llx size 0x%lx) no backing pages",
707+
chunk_start, chunk->mem.size);
687708
mutex_unlock(&client->mm_lock);
688709
kvfree(pages);
689710
return -EINVAL;
690711
}
691-
692-
for (i = first_pg; i < last_pg; i++)
693-
pages[pg_idx++] = chunk->base.pages[i];
694712
}
695713
chunk_start = chunk_end;
696714
}
697715
mutex_unlock(&client->mm_lock);
698716

699717
if (pg_idx != nr_pages) {
718+
XDNA_ERR(xdna, "DEV BO vmap gathered %d pages, expected %lu (bo 0x%llx-0x%llx)",
719+
pg_idx, nr_pages, bo_start, bo_end);
700720
kvfree(pages);
701721
return -EINVAL;
702722
}
@@ -901,6 +921,18 @@ amdxdna_drm_expand_dev_heap(struct amdxdna_client *client,
901921
}
902922
}
903923

924+
/*
925+
* Pre-set the expected UVA so that GET_BO_INFO returns the address
926+
* where this chunk must be mmapped for contiguous heap layout.
927+
* The mmap callback will update this to the actual mmap address.
928+
*/
929+
if (amdxdna_pasid_on(client)) {
930+
u64 heap_uva = amdxdna_gem_uva(client->dev_heap);
931+
932+
if (heap_uva != AMDXDNA_INVALID_ADDR)
933+
abo->mem.uva = heap_uva + client->total_heap_size;
934+
}
935+
904936
drm_gem_object_get(to_gobj(abo));
905937
list_add_tail(&abo->heap_chunk_node, &client->dev_heap_chunks);
906938
client->total_heap_size = new_total;

0 commit comments

Comments
 (0)