diff --git a/drivers/accel/amdxdna/aie.c b/drivers/accel/amdxdna/aie.c index f4e07d90b..ff635b9bb 100644 --- a/drivers/accel/amdxdna/aie.c +++ b/drivers/accel/amdxdna/aie.c @@ -3,7 +3,12 @@ * Copyright (C) 2026, Advanced Micro Devices, Inc. */ +#include +#include #include +#include +#include +#include #include "aie.h" #include "amdxdna_mailbox_helper.h" @@ -119,3 +124,74 @@ void amdxdna_vbnv_init(struct amdxdna_dev *xdna) if (!xdna->vbnv) xdna->vbnv = info->default_vbnv; } + +struct aie_dma_hdl *aie_dma_buf_alloc(struct amdxdna_dev *xdna, u32 size, + enum dma_data_direction dir) +{ + struct aie_dma_hdl *hdl; + int order; + + size = max_t(u32, size, SZ_8K); + order = get_order(size); + if (order > MAX_PAGE_ORDER) + return ERR_PTR(-EINVAL); + + hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); + if (!hdl) + return ERR_PTR(-ENOMEM); + + hdl->aligned_size = PAGE_SIZE << order; + + if (amdxdna_iova_on(xdna)) { + hdl->vaddr = amdxdna_iommu_alloc(xdna, hdl->aligned_size, &hdl->dma_addr); + if (IS_ERR(hdl->vaddr)) { + int ret = PTR_ERR(hdl->vaddr); + + kfree(hdl); + return ERR_PTR(ret); + } + } else { + hdl->vaddr = dma_alloc_noncoherent(xdna->ddev.dev, + hdl->aligned_size, + &hdl->dma_addr, dir, + GFP_KERNEL); + if (!hdl->vaddr) { + kfree(hdl); + return ERR_PTR(-ENOMEM); + } + } + + hdl->size = size; + hdl->xdna = xdna; + hdl->dir = dir; + + return hdl; +} + +void aie_dma_buf_free(struct aie_dma_hdl *hdl) +{ + if (!hdl) + return; + + if (amdxdna_iova_on(hdl->xdna)) { + amdxdna_iommu_free(hdl->xdna, hdl->aligned_size, hdl->vaddr, hdl->dma_addr); + } else { + dma_free_noncoherent(hdl->xdna->ddev.dev, hdl->aligned_size, + hdl->vaddr, hdl->dma_addr, hdl->dir); + } + + memset(hdl, 0, sizeof(*hdl)); + kfree(hdl); +} + +int aie_dma_buf_clflush(struct aie_dma_hdl *hdl, u32 offset, size_t size) +{ + if (!hdl) + return -EINVAL; + + if (offset + size > hdl->size) + return -EINVAL; + + drm_clflush_virt_range(hdl->vaddr + offset, size ? size : hdl->size); + return 0; +} diff --git a/drivers/accel/amdxdna/aie.h b/drivers/accel/amdxdna/aie.h index 7a68b114f..c359bd712 100644 --- a/drivers/accel/amdxdna/aie.h +++ b/drivers/accel/amdxdna/aie.h @@ -5,6 +5,8 @@ #ifndef _AIE_H_ #define _AIE_H_ +#include + #include "amdxdna_pci_drv.h" #include "amdxdna_mailbox.h" @@ -88,6 +90,28 @@ struct amdxdna_rev_vbnv { const char *vbnv; }; +/* + * struct aie_dma_hdl - DMA buffer handle for firmware communication + * @xdna: back-pointer to the device + * @dir: DMA data direction + * @vaddr: kernel virtual address + * @dma_addr: device DMA address + * @size: requested buffer size + * @aligned_size: actual allocation size (power-of-2 aligned for FW) + */ +struct aie_dma_hdl { + struct amdxdna_dev *xdna; + enum dma_data_direction dir; + void *vaddr; + dma_addr_t dma_addr; + size_t size; + size_t aligned_size; +}; + +#define to_dma_addr(hdl, off) ((hdl)->dma_addr + (off)) +#define to_cpu_addr(hdl, off) ((hdl)->vaddr + (off)) +#define to_buf_size(hdl) ((hdl)->aligned_size) + /* aie.c */ void aie_dump_mgmt_chann_debug(struct aie_device *aie); void aie_destroy_chann(struct aie_device *aie, struct mailbox_channel **chann); @@ -95,6 +119,11 @@ int aie_send_mgmt_msg_wait(struct aie_device *aie, struct xdna_mailbox_msg *msg) int aie_check_protocol(struct aie_device *aie, u32 fw_major, u32 fw_minor); void amdxdna_vbnv_init(struct amdxdna_dev *xdna); +struct aie_dma_hdl *aie_dma_buf_alloc(struct amdxdna_dev *xdna, u32 size, + enum dma_data_direction dir); +void aie_dma_buf_free(struct aie_dma_hdl *hdl); +int aie_dma_buf_clflush(struct aie_dma_hdl *hdl, u32 offset, size_t size); + /* aie_psp.c */ struct psp_device *aiem_psp_create(struct drm_device *ddev, struct psp_config *conf); int aie_psp_start(struct psp_device *psp); diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c index 9d20e956c..f3235a235 100644 --- a/drivers/accel/amdxdna/aie2_error.c +++ b/drivers/accel/amdxdna/aie2_error.c @@ -11,6 +11,7 @@ #include #include +#include "aie.h" #include "aie2_msg_priv.h" #include "aie2_pci.h" #include "amdxdna_error.h" @@ -29,9 +30,7 @@ struct async_event { struct async_events { struct workqueue_struct *wq; - u8 *buf; - dma_addr_t addr; - u32 size; + struct aie_dma_hdl *dma_buf; u32 event_cnt; struct async_event event[] __counted_by(event_cnt); }; @@ -338,7 +337,7 @@ void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev) destroy_workqueue(events->wq); mutex_lock(&xdna->dev_lock); - aie2_free_msg_buffer(ndev, events->size, events->buf, events->addr); + aie_dma_buf_free(events->dma_buf); kfree(events); } @@ -354,12 +353,11 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev) if (!events) return -ENOMEM; - events->buf = aie2_alloc_msg_buffer(ndev, &total_size, &events->addr); - if (IS_ERR(events->buf)) { - ret = PTR_ERR(events->buf); + events->dma_buf = aie_dma_buf_alloc(xdna, total_size, DMA_FROM_DEVICE); + if (IS_ERR(events->dma_buf)) { + ret = PTR_ERR(events->dma_buf); goto free_events; } - events->size = total_size; events->event_cnt = total_col; events->wq = alloc_ordered_workqueue("async_wq", 0); @@ -374,8 +372,8 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev) e->ndev = ndev; e->wq = events->wq; - e->buf = &events->buf[offset]; - e->addr = events->addr + offset; + e->buf = to_cpu_addr(events->dma_buf, offset); + e->addr = to_dma_addr(events->dma_buf, offset); e->size = ASYNC_BUF_SIZE; e->resp.status = MAX_AIE2_STATUS_CODE; INIT_WORK(&e->work, aie2_error_worker); @@ -387,14 +385,14 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev) ndev->async_events = events; - XDNA_DBG(xdna, "Async event count %d, buf total size 0x%x", - events->event_cnt, events->size); + XDNA_DBG(xdna, "Async event count %d, buf total size 0x%zx", + events->event_cnt, to_buf_size(events->dma_buf)); return 0; free_wq: destroy_workqueue(events->wq); free_buf: - aie2_free_msg_buffer(ndev, events->size, events->buf, events->addr); + aie_dma_buf_free(events->dma_buf); free_events: kfree(events); return ret; diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c index 93706e793..7ecfb0c77 100644 --- a/drivers/accel/amdxdna/aie2_message.c +++ b/drivers/accel/amdxdna/aie2_message.c @@ -27,43 +27,6 @@ #define EXEC_MSG_OPS(xdna) ((xdna)->dev_handle->exec_msg_ops) -void *aie2_alloc_msg_buffer(struct amdxdna_dev_hdl *ndev, u32 *size, - dma_addr_t *dma_addr) -{ - struct amdxdna_dev *xdna = ndev->aie.xdna; - void *vaddr; - int order; - - *size = max(*size, SZ_8K); - order = get_order(*size); - if (order > MAX_PAGE_ORDER) - return ERR_PTR(-EINVAL); - *size = PAGE_SIZE << order; - - if (amdxdna_iova_on(xdna)) - return amdxdna_iommu_alloc(xdna, *size, dma_addr); - - vaddr = dma_alloc_noncoherent(xdna->ddev.dev, *size, dma_addr, - DMA_FROM_DEVICE, GFP_KERNEL); - if (!vaddr) - return ERR_PTR(-ENOMEM); - - return vaddr; -} - -void aie2_free_msg_buffer(struct amdxdna_dev_hdl *ndev, size_t size, - void *cpu_addr, dma_addr_t dma_addr) -{ - struct amdxdna_dev *xdna = ndev->aie.xdna; - - if (amdxdna_iova_on(xdna)) { - amdxdna_iommu_free(xdna, size, cpu_addr, dma_addr); - return; - } - - dma_free_noncoherent(xdna->ddev.dev, size, cpu_addr, dma_addr, DMA_FROM_DEVICE); -} - int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev) { DECLARE_AIE_MSG(suspend, MSG_OP_SUSPEND); @@ -369,27 +332,26 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, { DECLARE_AIE_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS); struct amdxdna_dev *xdna = ndev->aie.xdna; - u32 buf_sz = size, aie_bitmap = 0; struct amdxdna_client *client; - dma_addr_t dma_addr; - u8 *buff_addr; + struct aie_dma_hdl *dma_buf; + u32 aie_bitmap = 0; int ret; - buff_addr = aie2_alloc_msg_buffer(ndev, &buf_sz, &dma_addr); - if (IS_ERR(buff_addr)) - return PTR_ERR(buff_addr); + dma_buf = aie_dma_buf_alloc(xdna, size, DMA_FROM_DEVICE); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); /* Go through each hardware context and mark the AIE columns that are active */ list_for_each_entry(client, &xdna->client_list, node) amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map); *cols_filled = 0; - req.dump_buff_addr = dma_addr; - req.dump_buff_size = buf_sz; + req.dump_buff_addr = to_dma_addr(dma_buf, 0); + req.dump_buff_size = to_buf_size(dma_buf); req.num_cols = hweight32(aie_bitmap); req.aie_bitmap = aie_bitmap; - drm_clflush_virt_range(buff_addr, size); /* device can access */ + aie_dma_buf_clflush(dma_buf, 0, size); ret = aie_send_mgmt_msg_wait(&ndev->aie, &msg); if (ret) { XDNA_ERR(xdna, "Error during NPU query, status %d", ret); @@ -404,7 +366,7 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, goto fail; } - if (copy_to_user(buf, buff_addr, resp.size)) { + if (copy_to_user(buf, to_cpu_addr(dma_buf, 0), resp.size)) { ret = -EFAULT; XDNA_ERR(xdna, "Failed to copy NPU status to user space"); goto fail; @@ -413,7 +375,7 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, *cols_filled = aie_bitmap; fail: - aie2_free_msg_buffer(ndev, buf_sz, buff_addr, dma_addr); + aie_dma_buf_free(dma_buf); return ret; } @@ -423,23 +385,21 @@ int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev, { DECLARE_AIE_MSG(get_telemetry, MSG_OP_GET_TELEMETRY); struct amdxdna_dev *xdna = ndev->aie.xdna; - dma_addr_t dma_addr; - u32 buf_sz = size; - u8 *addr; + struct aie_dma_hdl *dma_buf; int ret; if (header->type >= MAX_TELEMETRY_TYPE) return -EINVAL; - addr = aie2_alloc_msg_buffer(ndev, &buf_sz, &dma_addr); - if (IS_ERR(addr)) - return PTR_ERR(addr); + dma_buf = aie_dma_buf_alloc(xdna, size, DMA_FROM_DEVICE); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); - req.buf_addr = dma_addr; - req.buf_size = buf_sz; + req.buf_addr = to_dma_addr(dma_buf, 0); + req.buf_size = to_buf_size(dma_buf); req.type = header->type; - drm_clflush_virt_range(addr, size); /* device can access */ + aie_dma_buf_clflush(dma_buf, 0, size); ret = aie_send_mgmt_msg_wait(&ndev->aie, &msg); if (ret) { XDNA_ERR(xdna, "Query telemetry failed, status %d", ret); @@ -452,7 +412,7 @@ int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev, goto free_buf; } - if (copy_to_user(buf, addr, resp.size)) { + if (copy_to_user(buf, to_cpu_addr(dma_buf, 0), resp.size)) { ret = -EFAULT; XDNA_ERR(xdna, "Failed to copy telemetry to user space"); goto free_buf; @@ -462,7 +422,7 @@ int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev, header->minor = resp.minor; free_buf: - aie2_free_msg_buffer(ndev, buf_sz, addr, dma_addr); + aie_dma_buf_free(dma_buf); return ret; } @@ -1161,9 +1121,7 @@ int aie2_query_app_health(struct amdxdna_dev_hdl *ndev, u32 context_id, { DECLARE_AIE_MSG(get_app_health, MSG_OP_GET_APP_HEALTH); struct amdxdna_dev *xdna = ndev->aie.xdna; - struct app_health_report *buf; - dma_addr_t dma_addr; - u32 buf_size; + struct aie_dma_hdl *dma_buf; int ret; if (!AIE_FEATURE_ON(&ndev->aie, AIE2_APP_HEALTH)) { @@ -1171,29 +1129,27 @@ int aie2_query_app_health(struct amdxdna_dev_hdl *ndev, u32 context_id, return -EOPNOTSUPP; } - buf_size = sizeof(*report); - buf = aie2_alloc_msg_buffer(ndev, &buf_size, &dma_addr); - if (IS_ERR(buf)) { + dma_buf = aie_dma_buf_alloc(xdna, sizeof(*report), DMA_FROM_DEVICE); + if (IS_ERR(dma_buf)) { XDNA_ERR(xdna, "Failed to allocate buffer for app health"); - return PTR_ERR(buf); + return PTR_ERR(dma_buf); } - req.buf_addr = dma_addr; + req.buf_addr = to_dma_addr(dma_buf, 0); req.context_id = context_id; - req.buf_size = buf_size; + req.buf_size = to_buf_size(dma_buf); - drm_clflush_virt_range(buf, sizeof(*report)); + aie_dma_buf_clflush(dma_buf, 0, sizeof(*report)); ret = aie_send_mgmt_msg_wait(&ndev->aie, &msg); if (ret) { XDNA_ERR(xdna, "Get app health failed, ret %d status 0x%x", ret, resp.status); goto free_buf; } - /* Copy the report to caller's buffer */ - memcpy(report, buf, sizeof(*report)); + memcpy(report, to_cpu_addr(dma_buf, 0), sizeof(*report)); free_buf: - aie2_free_msg_buffer(ndev, buf_size, buf, dma_addr); + aie_dma_buf_free(dma_buf); return ret; } diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h index d6082d86a..dcb88adfe 100644 --- a/drivers/accel/amdxdna/aie2_pci.h +++ b/drivers/accel/amdxdna/aie2_pci.h @@ -326,10 +326,6 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int (*notify_cb)(void *, void __iomem *, size_t)); int aie2_update_prop_time_quota(struct amdxdna_dev_hdl *ndev, u32 us); -void *aie2_alloc_msg_buffer(struct amdxdna_dev_hdl *ndev, u32 *size, - dma_addr_t *dma_addr); -void aie2_free_msg_buffer(struct amdxdna_dev_hdl *ndev, size_t size, - void *cpu_addr, dma_addr_t dma_addr); /* aie2_hwctx.c */ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx); diff --git a/drivers/accel/amdxdna/aie4_message.c b/drivers/accel/amdxdna/aie4_message.c index 64995cde4..d261b6e3f 100644 --- a/drivers/accel/amdxdna/aie4_message.c +++ b/drivers/accel/amdxdna/aie4_message.c @@ -25,3 +25,27 @@ int aie4_suspend_fw(struct amdxdna_dev_hdl *ndev) return ret; } + +int aie4_attach_work_buffer(struct amdxdna_dev_hdl *ndev, u32 pasid, + dma_addr_t addr, u32 size) +{ + DECLARE_AIE_MSG(aie4_msg_attach_work_buffer, AIE4_MSG_OP_ATTACH_WORK_BUFFER); + struct amdxdna_dev *xdna = ndev->aie.xdna; + int ret; + + if (size < AIE4_WORK_BUFFER_MIN_SIZE || !addr) { + XDNA_ERR(xdna, "Invalid work buffer addr 0x%llx or size %d", + addr, size); + return -EINVAL; + } + + req.buff_addr = addr; + req.buff_size = size; + req.pasid.raw = pasid; + + ret = aie_send_mgmt_msg_wait(&ndev->aie, &msg); + if (ret) + XDNA_ERR(xdna, "Failed to attach work buffer, ret %d", ret); + + return ret; +} diff --git a/drivers/accel/amdxdna/aie4_msg_priv.h b/drivers/accel/amdxdna/aie4_msg_priv.h index 88463cc3a..5cb62a60b 100644 --- a/drivers/accel/amdxdna/aie4_msg_priv.h +++ b/drivers/accel/amdxdna/aie4_msg_priv.h @@ -10,7 +10,7 @@ enum aie4_msg_opcode { AIE4_MSG_OP_SUSPEND = 0x10003, - + AIE4_MSG_OP_ATTACH_WORK_BUFFER = 0x1000D, AIE4_MSG_OP_CREATE_VFS = 0x20001, AIE4_MSG_OP_DESTROY_VFS = 0x20002, }; @@ -46,4 +46,25 @@ struct aie4_msg_destroy_vfs_resp { enum aie4_msg_status status; } __packed; +union aie4_msg_pasid { + __u32 raw; + struct { + __u32 pasid : 20; + __u32 rsvd : 11; + __u32 pasid_vld : 1; + } f; +} __packed; + +#define AIE4_WORK_BUFFER_MIN_SIZE (4 * 1024 * 1024) /* 4 MB */ + +struct aie4_msg_attach_work_buffer_req { + __u64 buff_addr; + union aie4_msg_pasid pasid; + __u32 buff_size; +} __packed; + +struct aie4_msg_attach_work_buffer_resp { + enum aie4_msg_status status; +} __packed; + #endif /* _AIE4_MSG_PRIV_H_ */ diff --git a/drivers/accel/amdxdna/aie4_pci.c b/drivers/accel/amdxdna/aie4_pci.c index 320e249c2..c6eea3927 100644 --- a/drivers/accel/amdxdna/aie4_pci.c +++ b/drivers/accel/amdxdna/aie4_pci.c @@ -6,9 +6,11 @@ #include "drm/amdxdna_accel.h" #include #include +#include #include #include +#include "aie4_msg_priv.h" #include "aie4_pci.h" #include "amdxdna_pci_drv.h" @@ -234,9 +236,40 @@ static int aie4_fw_load(struct amdxdna_dev_hdl *ndev) return ret; } +static int aie4_alloc_work_buffer(struct amdxdna_dev_hdl *ndev) +{ + struct amdxdna_dev *xdna = ndev->aie.xdna; + struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev); + struct aie_dma_hdl *buf; + + if (pdev->is_virtfn) + return 0; + + buf = aie_dma_buf_alloc(xdna, AIE4_WORK_BUFFER_MIN_SIZE, + DMA_FROM_DEVICE); + if (IS_ERR(buf)) { + XDNA_ERR(xdna, "Failed to alloc work buffer, size 0x%x", + AIE4_WORK_BUFFER_MIN_SIZE); + return PTR_ERR(buf); + } + + ndev->work_buf = buf; + XDNA_DBG(xdna, "Work buffer allocated: size 0x%lx", + to_buf_size(buf)); + + return 0; +} + +static void aie4_free_work_buffer(struct amdxdna_dev_hdl *ndev) +{ + aie_dma_buf_free(ndev->work_buf); + ndev->work_buf = NULL; +} + static int aie4_hw_start(struct amdxdna_dev *xdna) { struct amdxdna_dev_hdl *ndev = xdna->dev_handle; + dma_addr_t dma_addr; int ret; ret = aie4_fw_load(ndev); @@ -247,8 +280,18 @@ static int aie4_hw_start(struct amdxdna_dev *xdna) if (ret) goto fw_unload; + if (ndev->work_buf) { + dma_addr = to_dma_addr(ndev->work_buf, 0); + ret = aie4_attach_work_buffer(ndev, 0, dma_addr, + to_buf_size(ndev->work_buf)); + if (ret) + goto mbox_fini; + } + return 0; +mbox_fini: + aie4_mailbox_fini(ndev); fw_unload: aie4_fw_unload(ndev); @@ -260,6 +303,7 @@ static void aie4_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev) int ret; /* No paired resume needed, fw is stateless */ + /* Firmware releases the DRAM work buffer internally during suspend */ ret = aie4_suspend_fw(ndev); if (ret) XDNA_ERR(ndev->aie.xdna, "suspend_fw failed, ret %d", ret); @@ -459,6 +503,7 @@ static void aie4_fini(struct amdxdna_dev *xdna) aie4_sriov_stop(ndev); aie4_pcidev_fini(ndev); + aie4_free_work_buffer(ndev); } static int aie4_init(struct amdxdna_dev *xdna) @@ -474,15 +519,23 @@ static int aie4_init(struct amdxdna_dev *xdna) ndev->aie.xdna = xdna; xdna->dev_handle = ndev; + ret = aie4_alloc_work_buffer(ndev); + if (ret) + return ret; + ret = aie4_pcidev_init(ndev); if (ret) { XDNA_ERR(xdna, "Setup PCI device failed, ret %d", ret); - return ret; + goto free_work_buf; } amdxdna_vbnv_init(xdna); XDNA_DBG(xdna, "aie4 init finished"); return 0; + +free_work_buf: + aie4_free_work_buffer(ndev); + return ret; } const struct amdxdna_dev_ops aie4_ops = { diff --git a/drivers/accel/amdxdna/aie4_pci.h b/drivers/accel/amdxdna/aie4_pci.h index aa1495c33..cce42255e 100644 --- a/drivers/accel/amdxdna/aie4_pci.h +++ b/drivers/accel/amdxdna/aie4_pci.h @@ -31,10 +31,13 @@ struct amdxdna_dev_hdl { void __iomem *rbuf_base; struct mailbox *mbox; + struct aie_dma_hdl *work_buf; }; /* aie4_message.c */ int aie4_suspend_fw(struct amdxdna_dev_hdl *ndev); +int aie4_attach_work_buffer(struct amdxdna_dev_hdl *ndev, u32 pasid, + dma_addr_t addr, u32 size); /* aie4_sriov.c */ #if IS_ENABLED(CONFIG_PCI_IOV) diff --git a/drivers/accel/amdxdna/aie4_sriov.c b/drivers/accel/amdxdna/aie4_sriov.c index b169c2374..71e069ca4 100644 --- a/drivers/accel/amdxdna/aie4_sriov.c +++ b/drivers/accel/amdxdna/aie4_sriov.c @@ -45,6 +45,9 @@ int aie4_sriov_stop(struct amdxdna_dev_hdl *ndev) struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev); int ret; + if (!pci_num_vf(pdev)) + return 0; + ret = pci_vfs_assigned(pdev); if (ret) { XDNA_ERR(xdna, "VFs are still assigned to VMs");