-
Notifications
You must be signed in to change notification settings - Fork 119
Expand file tree
/
Copy pathaie.c
More file actions
197 lines (156 loc) · 4.59 KB
/
aie.c
File metadata and controls
197 lines (156 loc) · 4.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2026, Advanced Micro Devices, Inc.
*/
#include <drm/drm_cache.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/log2.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include "aie.h"
#include "amdxdna_mailbox_helper.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
void aie_dump_mgmt_chann_debug(struct aie_device *aie)
{
struct amdxdna_dev *xdna = aie->xdna;
XDNA_DBG(xdna, "i2x tail 0x%x", aie->mgmt_i2x.mb_tail_ptr_reg);
XDNA_DBG(xdna, "i2x head 0x%x", aie->mgmt_i2x.mb_head_ptr_reg);
XDNA_DBG(xdna, "i2x ringbuf 0x%x", aie->mgmt_i2x.rb_start_addr);
XDNA_DBG(xdna, "i2x rsize 0x%x", aie->mgmt_i2x.rb_size);
XDNA_DBG(xdna, "x2i tail 0x%x", aie->mgmt_x2i.mb_tail_ptr_reg);
XDNA_DBG(xdna, "x2i head 0x%x", aie->mgmt_x2i.mb_head_ptr_reg);
XDNA_DBG(xdna, "x2i ringbuf 0x%x", aie->mgmt_x2i.rb_start_addr);
XDNA_DBG(xdna, "x2i rsize 0x%x", aie->mgmt_x2i.rb_size);
XDNA_DBG(xdna, "x2i chann index 0x%x", aie->mgmt_chan_idx);
XDNA_DBG(xdna, "mailbox protocol major 0x%x", aie->mgmt_prot_major);
XDNA_DBG(xdna, "mailbox protocol minor 0x%x", aie->mgmt_prot_minor);
}
void aie_destroy_chann(struct aie_device *aie, struct mailbox_channel **chann)
{
struct amdxdna_dev *xdna = aie->xdna;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
if (!*chann)
return;
xdna_mailbox_stop_channel(*chann);
xdna_mailbox_free_channel(*chann);
*chann = NULL;
}
int aie_send_mgmt_msg_wait(struct aie_device *aie, struct xdna_mailbox_msg *msg)
{
struct amdxdna_dev *xdna = aie->xdna;
struct xdna_notify *hdl = msg->handle;
int ret;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
if (!aie->mgmt_chann)
return -ENODEV;
ret = xdna_send_msg_wait(xdna, aie->mgmt_chann, msg);
if (ret == -ETIME)
aie_destroy_chann(aie, &aie->mgmt_chann);
if (!ret && *hdl->status) {
XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x",
msg->opcode, *hdl->data);
ret = -EINVAL;
}
return ret;
}
int aie_check_protocol(struct aie_device *aie, u32 fw_major, u32 fw_minor)
{
const struct amdxdna_fw_feature_tbl *feature;
bool found = false;
for (feature = aie->xdna->dev_info->fw_feature_tbl;
feature->major; feature++) {
if (feature->major != fw_major)
continue;
if (fw_minor < feature->min_minor)
continue;
if (feature->max_minor > 0 && fw_minor > feature->max_minor)
continue;
aie->feature_mask |= feature->features;
/* firmware version matches one of the driver support entry */
found = true;
}
return found ? 0 : -EOPNOTSUPP;
}
static const char *amdxdna_lookup_vbnv(const struct amdxdna_rev_vbnv *tbl, u32 rev)
{
int i;
if (!tbl)
return NULL;
for (i = 0; tbl[i].vbnv; i++) {
if (tbl[i].revision == rev)
return tbl[i].vbnv;
}
return NULL;
}
void amdxdna_vbnv_init(struct amdxdna_dev *xdna)
{
const struct amdxdna_dev_info *info = xdna->dev_info;
u32 rev;
xdna->vbnv = info->default_vbnv;
if (!info->ops->get_dev_revision)
return;
if (info->ops->get_dev_revision(xdna, &rev))
return;
xdna->vbnv = amdxdna_lookup_vbnv(info->rev_vbnv_tbl, rev);
if (!xdna->vbnv)
xdna->vbnv = info->default_vbnv;
}
struct aie_dma_hdl *aie_dma_buf_alloc(struct amdxdna_dev *xdna, u32 size,
enum dma_data_direction dir)
{
struct aie_dma_hdl *hdl;
int order;
size = max_t(u32, size, SZ_8K);
order = get_order(size);
if (order > MAX_PAGE_ORDER)
return ERR_PTR(-EINVAL);
hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
if (!hdl)
return ERR_PTR(-ENOMEM);
hdl->aligned_size = PAGE_SIZE << order;
if (amdxdna_iova_on(xdna)) {
hdl->vaddr = amdxdna_iommu_alloc(xdna, hdl->aligned_size, &hdl->dma_addr);
if (IS_ERR(hdl->vaddr)) {
int ret = PTR_ERR(hdl->vaddr);
kfree(hdl);
return ERR_PTR(ret);
}
} else {
hdl->vaddr = dma_alloc_noncoherent(xdna->ddev.dev,
hdl->aligned_size,
&hdl->dma_addr, dir,
GFP_KERNEL);
if (!hdl->vaddr) {
kfree(hdl);
return ERR_PTR(-ENOMEM);
}
}
hdl->size = size;
hdl->xdna = xdna;
hdl->dir = dir;
return hdl;
}
void aie_dma_buf_free(struct aie_dma_hdl *hdl)
{
if (!hdl)
return;
if (amdxdna_iova_on(hdl->xdna)) {
amdxdna_iommu_free(hdl->xdna, hdl->aligned_size, hdl->vaddr, hdl->dma_addr);
} else {
dma_free_noncoherent(hdl->xdna->ddev.dev, hdl->aligned_size,
hdl->vaddr, hdl->dma_addr, hdl->dir);
}
memset(hdl, 0, sizeof(*hdl));
kfree(hdl);
}
int aie_dma_buf_clflush(struct aie_dma_hdl *hdl, u32 offset, size_t size)
{
if (!hdl)
return -EINVAL;
if (offset + size > hdl->size)
return -EINVAL;
drm_clflush_virt_range(hdl->vaddr + offset, size ? size : hdl->size);
return 0;
}