Skip to content

Commit e0d833e

Browse files
committed
accel/amdxdna: Add KVM/QEMU support for expandable device heap
Add support for device heap expansion when running under KVM/QEMU where heap chunks are imported via dma-buf rather than allocated as shmem objects. - amdxdna_gem_dev_obj_vmap: Handle imported heap chunks that have sgt (scatter-gather table) but no pages array. Use for_each_sgtable_page() to gather pages from the chunk's sgt when chunk->base.pages is NULL. Add error logging for chunks with neither pages nor sgt, and for page count mismatches. - amdxdna_gem_vmap: Include BO type in error messages for easier debugging. - amdxdna_drm_expand_dev_heap: Pre-set expansion chunk UVA based on the heap base address plus current total heap size, so GET_BO_INFO returns the expected contiguous address for the shim/vxdna to MAP_FIXED mmap at. Signed-off-by: Wendy Liang <wendy.liang@amd.com>
1 parent ba87455 commit e0d833e

File tree

1 file changed

+38
-6
lines changed

1 file changed

+38
-6
lines changed

drivers/accel/amdxdna/amdxdna_gem.c

Lines changed: 38 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,9 @@ void *amdxdna_gem_vmap(struct amdxdna_gem_obj *abo)
159159
if (!abo->mem.kva) {
160160
ret = drm_gem_vmap(to_gobj(abo), &map);
161161
if (ret)
162-
XDNA_ERR(abo->client->xdna, "Vmap bo failed, ret %d", ret);
162+
XDNA_ERR(abo->client->xdna,
163+
"Vmap bo failed, ret %d, type %d",
164+
ret, abo->type);
163165
else
164166
abo->mem.kva = map.vaddr;
165167
}
@@ -673,7 +675,8 @@ static int amdxdna_gem_dev_obj_vmap(struct drm_gem_object *obj, struct iosys_map
673675
{
674676
struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
675677
struct amdxdna_client *client = abo->client;
676-
u64 dev_base = client->xdna->dev_info->dev_mem_base;
678+
struct amdxdna_dev *xdna = client->xdna;
679+
u64 dev_base = xdna->dev_info->dev_mem_base;
677680
u64 bo_start = abo->mm_node.start - dev_base;
678681
u64 bo_end = bo_start + abo->mm_node.size;
679682
unsigned long nr_pages = abo->mm_node.size >> PAGE_SHIFT;
@@ -702,20 +705,37 @@ static int amdxdna_gem_dev_obj_vmap(struct drm_gem_object *obj, struct iosys_map
702705
first_pg = overlap_start >> PAGE_SHIFT;
703706
last_pg = overlap_end >> PAGE_SHIFT;
704707

705-
if (!chunk->base.pages) {
708+
if (chunk->base.pages) {
709+
for (i = first_pg; i < last_pg; i++)
710+
pages[pg_idx++] = chunk->base.pages[i];
711+
} else if (chunk->base.sgt) {
712+
struct sg_page_iter piter;
713+
unsigned long cnt = 0;
714+
715+
for_each_sgtable_page(chunk->base.sgt,
716+
&piter, first_pg) {
717+
if (cnt >= last_pg - first_pg)
718+
break;
719+
pages[pg_idx++] =
720+
sg_page_iter_page(&piter);
721+
cnt++;
722+
}
723+
} else {
724+
XDNA_ERR(xdna,
725+
"Heap chunk (offset 0x%llx size 0x%lx) no backing pages",
726+
chunk_start, chunk->mem.size);
706727
mutex_unlock(&client->mm_lock);
707728
kvfree(pages);
708729
return -EINVAL;
709730
}
710-
711-
for (i = first_pg; i < last_pg; i++)
712-
pages[pg_idx++] = chunk->base.pages[i];
713731
}
714732
chunk_start = chunk_end;
715733
}
716734
mutex_unlock(&client->mm_lock);
717735

718736
if (pg_idx != nr_pages) {
737+
XDNA_ERR(xdna, "DEV BO vmap gathered %d pages, expected %lu (bo 0x%llx-0x%llx)",
738+
pg_idx, nr_pages, bo_start, bo_end);
719739
kvfree(pages);
720740
return -EINVAL;
721741
}
@@ -921,6 +941,18 @@ amdxdna_drm_expand_dev_heap(struct amdxdna_client *client,
921941
}
922942
}
923943

944+
/*
945+
* Pre-set the expected UVA so that GET_BO_INFO returns the address
946+
* where this chunk must be mmapped for contiguous heap layout.
947+
* The mmap callback will update this to the actual mmap address.
948+
*/
949+
if (amdxdna_pasid_on(client)) {
950+
u64 heap_uva = amdxdna_gem_uva(client->dev_heap);
951+
952+
if (heap_uva != AMDXDNA_INVALID_ADDR)
953+
abo->mem.uva = heap_uva + client->total_heap_size;
954+
}
955+
924956
drm_gem_object_get(to_gobj(abo));
925957
list_add_tail(&abo->heap_chunk_node, &client->dev_heap_chunks);
926958
client->total_heap_size = new_total;

0 commit comments

Comments
 (0)