summaryrefslogtreecommitdiff
path: root/drivers/accel/qaic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/accel/qaic')
-rw-r--r--drivers/accel/qaic/Makefile5
-rw-r--r--drivers/accel/qaic/mhi_controller.c38
-rw-r--r--drivers/accel/qaic/qaic.h12
-rw-r--r--drivers/accel/qaic/qaic_control.c2
-rw-r--r--drivers/accel/qaic/qaic_data.c83
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c305
-rw-r--r--drivers/accel/qaic/qaic_debugfs.h20
-rw-r--r--drivers/accel/qaic/qaic_drv.c177
-rw-r--r--drivers/accel/qaic/sahara.c822
-rw-r--r--drivers/accel/qaic/sahara.h10
10 files changed, 1371 insertions, 103 deletions
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 3f7f6dfde7f2..35e883515629 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -10,4 +10,7 @@ qaic-y := \
qaic_control.o \
qaic_data.o \
qaic_drv.o \
- qaic_timesync.o
+ qaic_timesync.o \
+ sahara.o
+
+qaic-$(CONFIG_DEBUG_FS) += qaic_debugfs.o
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index cb77d048ed54..8ab82e78dd94 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -20,7 +20,7 @@ static unsigned int mhi_timeout_ms = 2000; /* 2 sec default */
module_param(mhi_timeout_ms, uint, 0600);
MODULE_PARM_DESC(mhi_timeout_ms, "MHI controller timeout value");
-static struct mhi_channel_config aic100_channels[] = {
+static const struct mhi_channel_config aic100_channels[] = {
{
.name = "QAIC_LOOPBACK",
.num = 0,
@@ -358,8 +358,8 @@ static struct mhi_channel_config aic100_channels[] = {
.wake_capable = false,
},
{
- .num = 21,
.name = "QAIC_TIMESYNC",
+ .num = 21,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
@@ -390,8 +390,8 @@ static struct mhi_channel_config aic100_channels[] = {
.wake_capable = false,
},
{
- .num = 23,
.name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 23,
.num_elements = 32,
.local_elements = 0,
.event_ring = 0,
@@ -405,6 +405,38 @@ static struct mhi_channel_config aic100_channels[] = {
.auto_queue = false,
.wake_capable = false,
},
+ {
+ .name = "IPCR",
+ .num = 24,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 25,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ .wake_capable = false,
+ },
};
static struct mhi_event_config aic100_events[] = {
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 582836f9538f..02561b6cecc6 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -30,6 +30,7 @@
#define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
#define to_drm(qddev) (&(qddev)->drm)
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
+#define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
enum __packed dev_states {
/* Device is offline or will be very soon */
@@ -152,6 +153,14 @@ struct qaic_device {
struct mhi_device *qts_ch;
/* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
struct workqueue_struct *qts_wq;
+ /* Head of list of page allocated by MHI bootlog device */
+ struct list_head bootlog;
+ /* MHI bootlog channel device */
+ struct mhi_device *bootlog_ch;
+ /* Work queue for tasks related to MHI bootlog device */
+ struct workqueue_struct *bootlog_wq;
+ /* Synchronizes access of pages in MHI bootlog device */
+ struct mutex bootlog_mutex;
};
struct qaic_drm_device {
@@ -191,8 +200,6 @@ struct qaic_bo {
u32 nr_slice;
/* Number of slice that have been transferred by DMA engine */
u32 nr_slice_xfer_done;
- /* true = BO is queued for execution, true = BO is not queued */
- bool queued;
/*
* If true then user has attached slicing information to this BO by
* calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
@@ -281,6 +288,7 @@ int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
void release_dbc(struct qaic_device *qdev, u32 dbc_id);
+void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail);
void wake_all_cntl(struct qaic_device *qdev);
void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index 9e8a8cbadf6b..d8bdab69f800 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -496,7 +496,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr
nents = sgt->nents;
nents_dma = nents;
*size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
- for_each_sgtable_sg(sgt, sg, i) {
+ for_each_sgtable_dma_sg(sgt, sg, i) {
*size -= sizeof(*asp);
/* Save 1K for possible follow-up transactions. */
if (*size < SZ_1K) {
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 03c9a793da35..43aba57b48f0 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -141,6 +141,11 @@ struct dbc_rsp {
__le16 status;
} __packed;
+static inline bool bo_queued(struct qaic_bo *bo)
+{
+ return !list_empty(&bo->xfer_list);
+}
+
inline int get_dbc_req_elem_size(void)
{
return sizeof(struct dbc_req);
@@ -167,9 +172,10 @@ static void free_slice(struct kref *kref)
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
struct sg_table *sgt_in, u64 size, u64 offset)
{
- int total_len, len, nents, offf = 0, offl = 0;
struct scatterlist *sg, *sgn, *sgf, *sgl;
+ unsigned int len, nents, offf, offl;
struct sg_table *sgt;
+ size_t total_len;
int ret, j;
/* find out number of relevant nents needed for this mem */
@@ -177,9 +183,11 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
sgf = NULL;
sgl = NULL;
nents = 0;
+ offf = 0;
+ offl = 0;
size = size ? size : PAGE_SIZE;
- for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
+ for_each_sgtable_dma_sg(sgt_in, sg, j) {
len = sg_dma_len(sg);
if (!len)
@@ -216,7 +224,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
/* copy relevant sg node and fix page and length */
sgn = sgf;
- for_each_sgtable_sg(sgt, sg, j) {
+ for_each_sgtable_dma_sg(sgt, sg, j) {
memcpy(sg, sgn, sizeof(*sg));
if (sgn == sgf) {
sg_dma_address(sg) += offf;
@@ -296,7 +304,7 @@ static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
* fence.
*/
dev_addr = req->dev_addr;
- for_each_sgtable_sg(slice->sgt, sg, i) {
+ for_each_sgtable_dma_sg(slice->sgt, sg, i) {
slice->reqs[i].cmd = cmd;
slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
sg_dma_address(sg) : dev_addr);
@@ -549,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem)
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
u32 count, u64 total_size)
{
+ u64 total;
int i;
for (i = 0; i < count; i++) {
@@ -558,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
return -EINVAL;
- if (slice_ent[i].offset + slice_ent[i].size > total_size)
+ if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
+ total > total_size)
return -EINVAL;
}
@@ -569,6 +579,9 @@ static void qaic_free_sgt(struct sg_table *sgt)
{
struct scatterlist *sg;
+ if (!sgt)
+ return;
+
for (sg = sgt->sgl; sg; sg = sg_next(sg))
if (sg_page(sg))
__free_pages(sg_page(sg), get_order(sg->length));
@@ -648,6 +661,7 @@ static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
}
complete_all(&bo->xfer_done);
INIT_LIST_HEAD(&bo->slices);
+ INIT_LIST_HEAD(&bo->xfer_list);
}
static struct qaic_bo *qaic_alloc_init_bo(void)
@@ -709,9 +723,13 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (ret)
goto free_bo;
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto free_bo;
+
ret = drm_gem_handle_create(file_priv, obj, &args->handle);
if (ret)
- goto free_sgt;
+ goto free_bo;
bo->handle = args->handle;
drm_gem_object_put(obj);
@@ -720,10 +738,8 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
return 0;
-free_sgt:
- qaic_free_sgt(bo->sgt);
free_bo:
- kfree(bo);
+ drm_gem_object_put(obj);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
@@ -738,7 +754,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
struct drm_gem_object *obj;
struct qaic_device *qdev;
struct qaic_user *usr;
- int ret;
+ int ret = 0;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -760,9 +776,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
goto unlock_dev_srcu;
}
- ret = drm_gem_create_mmap_offset(obj);
- if (ret == 0)
- args->offset = drm_vma_node_offset_addr(&obj->vma_node);
+ args->offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put(obj);
@@ -828,9 +842,6 @@ static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_h
struct sg_table *sgt;
int ret;
- if (obj->import_attach->dmabuf->size < hdr->size)
- return -EINVAL;
-
sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
@@ -847,9 +858,6 @@ static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
{
int ret;
- if (bo->base.size < hdr->size)
- return -EINVAL;
-
ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
if (ret)
return -EFAULT;
@@ -950,9 +958,6 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
if (arg_size / args->hdr.count != sizeof(*slice_ent))
return -EINVAL;
- if (args->hdr.size == 0)
- return -EINVAL;
-
if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
return -EINVAL;
@@ -992,16 +997,16 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
goto free_slice_ent;
}
- ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, args->hdr.size);
- if (ret)
- goto free_slice_ent;
-
obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
if (!obj) {
ret = -ENOENT;
goto free_slice_ent;
}
+ ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, obj->size);
+ if (ret)
+ goto put_bo;
+
bo = to_qaic_bo(obj);
ret = mutex_lock_interruptible(&bo->lock);
if (ret)
@@ -1173,7 +1178,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
struct bo_slice *slice;
unsigned long flags;
struct qaic_bo *bo;
- bool queued;
int i, j;
int ret;
@@ -1205,9 +1209,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
}
spin_lock_irqsave(&dbc->xfer_lock, flags);
- queued = bo->queued;
- bo->queued = true;
- if (queued) {
+ if (bo_queued(bo)) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EINVAL;
goto unlock_bo;
@@ -1230,7 +1232,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
else
ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
if (ret) {
- bo->queued = false;
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
goto unlock_bo;
}
@@ -1253,8 +1254,7 @@ failed_to_send_bo:
spin_lock_irqsave(&dbc->xfer_lock, flags);
bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
obj = &bo->base;
- bo->queued = false;
- list_del(&bo->xfer_list);
+ list_del_init(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
drm_gem_object_put(obj);
@@ -1615,8 +1615,7 @@ read_fifo:
*/
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
bo->nr_slice_xfer_done = 0;
- bo->queued = false;
- list_del(&bo->xfer_list);
+ list_del_init(&bo->xfer_list);
bo->perf_stats.req_processed_ts = ktime_get_ns();
complete_all(&bo->xfer_done);
drm_gem_object_put(&bo->base);
@@ -1875,7 +1874,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
/* Check if BO is committed to H/W for DMA */
spin_lock_irqsave(&dbc->xfer_lock, flags);
- if (bo->queued) {
+ if (bo_queued(bo)) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EBUSY;
goto unlock_ch_srcu;
@@ -1905,8 +1904,7 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
spin_lock_irqsave(&dbc->xfer_lock, flags);
while (!list_empty(&dbc->xfer_list)) {
bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
- bo->queued = false;
- list_del(&bo->xfer_list);
+ list_del_init(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
bo->nr_slice_xfer_done = 0;
bo->req_id = 0;
@@ -1988,3 +1986,12 @@ void release_dbc(struct qaic_device *qdev, u32 dbc_id)
dbc->in_use = false;
wake_up(&dbc->dbc_release);
}
+
+void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail)
+{
+ if (!dbc || !head || !tail)
+ return;
+
+ *head = readl(dbc->dbc_base + REQHP_OFF);
+ *tail = readl(dbc->dbc_base + REQTP_OFF);
+}
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
new file mode 100644
index 000000000000..ba0cf2f94732
--- /dev/null
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "qaic.h"
+#include "qaic_debugfs.h"
+
+#define BOOTLOG_POOL_SIZE 16
+#define BOOTLOG_MSG_SIZE 512
+#define QAIC_DBC_DIR_NAME 9
+
+struct bootlog_msg {
+ /* Buffer for bootlog messages */
+ char str[BOOTLOG_MSG_SIZE];
+ /* Root struct of device, used to access device resources */
+ struct qaic_device *qdev;
+ /* Work struct to schedule work coming on QAIC_LOGGING channel */
+ struct work_struct work;
+};
+
+struct bootlog_page {
+ /* Node in list of bootlog pages maintained by root device struct */
+ struct list_head node;
+ /* Total size of the buffer that holds the bootlogs. It is PAGE_SIZE */
+ unsigned int size;
+ /* Offset for the next bootlog */
+ unsigned int offset;
+};
+
+static int bootlog_show(struct seq_file *s, void *unused)
+{
+ struct bootlog_page *page;
+ struct qaic_device *qdev;
+ void *page_end;
+ void *log;
+
+ qdev = s->private;
+ mutex_lock(&qdev->bootlog_mutex);
+ list_for_each_entry(page, &qdev->bootlog, node) {
+ log = page + 1;
+ page_end = (void *)page + page->offset;
+ while (log < page_end) {
+ seq_printf(s, "%s", (char *)log);
+ log += strlen(log) + 1;
+ }
+ }
+ mutex_unlock(&qdev->bootlog_mutex);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(bootlog);
+
+static int fifo_size_show(struct seq_file *s, void *unused)
+{
+ struct dma_bridge_chan *dbc = s->private;
+
+ seq_printf(s, "%u\n", dbc->nelem);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(fifo_size);
+
+static int queued_show(struct seq_file *s, void *unused)
+{
+ struct dma_bridge_chan *dbc = s->private;
+ u32 tail = 0, head = 0;
+
+ qaic_data_get_fifo_info(dbc, &head, &tail);
+
+ if (head == U32_MAX || tail == U32_MAX)
+ seq_printf(s, "%u\n", 0);
+ else if (head > tail)
+ seq_printf(s, "%u\n", dbc->nelem - head + tail);
+ else
+ seq_printf(s, "%u\n", tail - head);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(queued);
+
+void qaic_debugfs_init(struct qaic_drm_device *qddev)
+{
+ struct qaic_device *qdev = qddev->qdev;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_dir;
+ char name[QAIC_DBC_DIR_NAME];
+ u32 i;
+
+ debugfs_root = to_drm(qddev)->debugfs_root;
+
+ debugfs_create_file("bootlog", 0400, debugfs_root, qdev, &bootlog_fops);
+ /*
+ * 256 dbcs per device is likely the max we will ever see and lets static checking see a
+ * reasonable range.
+ */
+ for (i = 0; i < qdev->num_dbc && i < 256; ++i) {
+ snprintf(name, QAIC_DBC_DIR_NAME, "dbc%03u", i);
+ debugfs_dir = debugfs_create_dir(name, debugfs_root);
+ debugfs_create_file("fifo_size", 0400, debugfs_dir, &qdev->dbc[i], &fifo_size_fops);
+ debugfs_create_file("queued", 0400, debugfs_dir, &qdev->dbc[i], &queued_fops);
+ }
+}
+
+static struct bootlog_page *alloc_bootlog_page(struct qaic_device *qdev)
+{
+ struct bootlog_page *page;
+
+ page = (struct bootlog_page *)devm_get_free_pages(&qdev->pdev->dev, GFP_KERNEL, 0);
+ if (!page)
+ return page;
+
+ page->size = PAGE_SIZE;
+ page->offset = sizeof(*page);
+ list_add_tail(&page->node, &qdev->bootlog);
+
+ return page;
+}
+
+static int reset_bootlog(struct qaic_device *qdev)
+{
+ struct bootlog_page *page;
+ struct bootlog_page *i;
+
+ mutex_lock(&qdev->bootlog_mutex);
+ list_for_each_entry_safe(page, i, &qdev->bootlog, node) {
+ list_del(&page->node);
+ devm_free_pages(&qdev->pdev->dev, (unsigned long)page);
+ }
+
+ page = alloc_bootlog_page(qdev);
+ mutex_unlock(&qdev->bootlog_mutex);
+ if (!page)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void *bootlog_get_space(struct qaic_device *qdev, unsigned int size)
+{
+ struct bootlog_page *page;
+
+ page = list_last_entry(&qdev->bootlog, struct bootlog_page, node);
+
+ if (size_add(size, sizeof(*page)) > page->size)
+ return NULL;
+
+ if (page->offset + size > page->size) {
+ page = alloc_bootlog_page(qdev);
+ if (!page)
+ return NULL;
+ }
+
+ return (void *)page + page->offset;
+}
+
+static void bootlog_commit(struct qaic_device *qdev, unsigned int size)
+{
+ struct bootlog_page *page;
+
+ page = list_last_entry(&qdev->bootlog, struct bootlog_page, node);
+
+ page->offset += size;
+}
+
+static void bootlog_log(struct work_struct *work)
+{
+ struct bootlog_msg *msg = container_of(work, struct bootlog_msg, work);
+ unsigned int len = strlen(msg->str) + 1;
+ struct qaic_device *qdev = msg->qdev;
+ void *log;
+
+ mutex_lock(&qdev->bootlog_mutex);
+ log = bootlog_get_space(qdev, len);
+ if (log) {
+ memcpy(log, msg, len);
+ bootlog_commit(qdev, len);
+ }
+ mutex_unlock(&qdev->bootlog_mutex);
+
+ if (mhi_queue_buf(qdev->bootlog_ch, DMA_FROM_DEVICE, msg, BOOTLOG_MSG_SIZE, MHI_EOT))
+ devm_kfree(&qdev->pdev->dev, msg);
+}
+
+static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct bootlog_msg *msg;
+ int i, ret;
+
+ qdev->bootlog_wq = alloc_ordered_workqueue("qaic_bootlog", 0);
+ if (!qdev->bootlog_wq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = reset_bootlog(qdev);
+ if (ret)
+ goto destroy_workqueue;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ goto destroy_workqueue;
+
+ for (i = 0; i < BOOTLOG_POOL_SIZE; i++) {
+ msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto mhi_unprepare;
+ }
+
+ msg->qdev = qdev;
+ INIT_WORK(&msg->work, bootlog_log);
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, msg, BOOTLOG_MSG_SIZE, MHI_EOT);
+ if (ret)
+ goto mhi_unprepare;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->bootlog_ch = mhi_dev;
+ return 0;
+
+mhi_unprepare:
+ mhi_unprepare_from_transfer(mhi_dev);
+destroy_workqueue:
+ flush_workqueue(qdev->bootlog_wq);
+ destroy_workqueue(qdev->bootlog_wq);
+out:
+ return ret;
+}
+
+static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+
+ mhi_unprepare_from_transfer(qdev->bootlog_ch);
+ flush_workqueue(qdev->bootlog_wq);
+ destroy_workqueue(qdev->bootlog_wq);
+ qdev->bootlog_ch = NULL;
+}
+
+static void qaic_bootlog_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+}
+
+static void qaic_bootlog_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct bootlog_msg *msg = mhi_result->buf_addr;
+
+ if (mhi_result->transaction_status) {
+ devm_kfree(&qdev->pdev->dev, msg);
+ return;
+ }
+
+ /* Force a null at the end of the transferred string */
+ msg->str[mhi_result->bytes_xferd - 1] = 0;
+
+ queue_work(qdev->bootlog_wq, &msg->work);
+}
+
+static const struct mhi_device_id qaic_bootlog_mhi_match_table[] = {
+ { .chan = "QAIC_LOGGING", },
+ {},
+};
+
+static struct mhi_driver qaic_bootlog_mhi_driver = {
+ .id_table = qaic_bootlog_mhi_match_table,
+ .remove = qaic_bootlog_mhi_remove,
+ .probe = qaic_bootlog_mhi_probe,
+ .ul_xfer_cb = qaic_bootlog_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_bootlog_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_bootlog",
+ },
+};
+
+int qaic_bootlog_register(void)
+{
+ return mhi_driver_register(&qaic_bootlog_mhi_driver);
+}
+
+void qaic_bootlog_unregister(void)
+{
+ mhi_driver_unregister(&qaic_bootlog_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_debugfs.h b/drivers/accel/qaic/qaic_debugfs.h
new file mode 100644
index 000000000000..05e74f84cf9f
--- /dev/null
+++ b/drivers/accel/qaic/qaic_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#ifndef __QAIC_DEBUGFS_H__
+#define __QAIC_DEBUGFS_H__
+
+#include <drm/drm_file.h>
+
+#ifdef CONFIG_DEBUG_FS
+int qaic_bootlog_register(void);
+void qaic_bootlog_unregister(void);
+void qaic_debugfs_init(struct qaic_drm_device *qddev);
+#else
+static inline int qaic_bootlog_register(void) { return 0; }
+static inline void qaic_bootlog_unregister(void) {}
+static inline void qaic_debugfs_init(struct qaic_drm_device *qddev) {}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* __QAIC_DEBUGFS_H__ */
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 2a313eb69b12..81819b9ef8d4 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -28,10 +28,13 @@
#include "mhi_controller.h"
#include "qaic.h"
+#include "qaic_debugfs.h"
#include "qaic_timesync.h"
+#include "sahara.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
+#define PCI_DEV_AIC080 0xa080
#define PCI_DEV_AIC100 0xa100
#define QAIC_NAME "qaic"
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
@@ -44,6 +47,53 @@ MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
static bool link_up;
static DEFINE_IDA(qaic_usrs);
+static void qaicm_wq_release(struct drm_device *dev, void *res)
+{
+ struct workqueue_struct *wq = res;
+
+ destroy_workqueue(wq);
+}
+
+static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *name)
+{
+ struct workqueue_struct *wq;
+ int ret;
+
+ wq = alloc_workqueue("%s", WQ_UNBOUND, 0, name);
+ if (!wq)
+ return ERR_PTR(-ENOMEM);
+ ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return wq;
+}
+
+static void qaicm_srcu_release(struct drm_device *dev, void *res)
+{
+ struct srcu_struct *lock = res;
+
+ cleanup_srcu_struct(lock);
+}
+
+static int qaicm_srcu_init(struct drm_device *dev, struct srcu_struct *lock)
+{
+ int ret;
+
+ ret = init_srcu_struct(lock);
+ if (ret)
+ return ret;
+
+ return drmm_add_action_or_reset(dev, qaicm_srcu_release, lock);
+}
+
+static void qaicm_pci_release(struct drm_device *dev, void *res)
+{
+ struct qaic_device *qdev = to_qaic_device(dev);
+
+ pci_set_drvdata(qdev->pdev, NULL);
+}
+
static void free_usr(struct kref *kref)
{
struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
@@ -158,7 +208,6 @@ static const struct drm_driver qaic_accel_driver = {
.name = QAIC_NAME,
.desc = QAIC_DESC,
- .date = "20190618",
.fops = &qaic_accel_fops,
.open = qaic_open,
@@ -182,8 +231,12 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
qddev->partition_id = partition_id;
ret = drm_dev_register(drm, 0);
- if (ret)
+ if (ret) {
pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
+ return ret;
+ }
+
+ qaic_debugfs_init(qddev);
return ret;
}
@@ -299,74 +352,77 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
release_dbc(qdev, i);
}
-static void cleanup_qdev(struct qaic_device *qdev)
-{
- int i;
-
- for (i = 0; i < qdev->num_dbc; ++i)
- cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
- cleanup_srcu_struct(&qdev->dev_lock);
- pci_set_drvdata(qdev->pdev, NULL);
- destroy_workqueue(qdev->cntl_wq);
- destroy_workqueue(qdev->qts_wq);
-}
-
static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
struct qaic_drm_device *qddev;
struct qaic_device *qdev;
- int i;
+ struct drm_device *drm;
+ int i, ret;
- qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
+ qdev = devm_kzalloc(dev, sizeof(*qdev), GFP_KERNEL);
if (!qdev)
return NULL;
qdev->dev_state = QAIC_OFFLINE;
- if (id->device == PCI_DEV_AIC100) {
+ if (id->device == PCI_DEV_AIC080 || id->device == PCI_DEV_AIC100) {
qdev->num_dbc = 16;
- qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
+ qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
if (!qdev->dbc)
return NULL;
}
- qdev->cntl_wq = alloc_workqueue("qaic_cntl", WQ_UNBOUND, 0);
- if (!qdev->cntl_wq)
+ qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
+ if (IS_ERR(qddev))
return NULL;
- qdev->qts_wq = alloc_workqueue("qaic_ts", WQ_UNBOUND, 0);
- if (!qdev->qts_wq) {
- destroy_workqueue(qdev->cntl_wq);
+ drm = to_drm(qddev);
+ pci_set_drvdata(pdev, qdev);
+
+ ret = drmm_mutex_init(drm, &qddev->users_mutex);
+ if (ret)
+ return NULL;
+ ret = drmm_add_action_or_reset(drm, qaicm_pci_release, NULL);
+ if (ret)
+ return NULL;
+ ret = drmm_mutex_init(drm, &qdev->cntl_mutex);
+ if (ret)
+ return NULL;
+ ret = drmm_mutex_init(drm, &qdev->bootlog_mutex);
+ if (ret)
return NULL;
- }
- pci_set_drvdata(pdev, qdev);
+ qdev->cntl_wq = qaicm_wq_init(drm, "qaic_cntl");
+ if (IS_ERR(qdev->cntl_wq))
+ return NULL;
+ qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts");
+ if (IS_ERR(qdev->qts_wq))
+ return NULL;
+
+ ret = qaicm_srcu_init(drm, &qdev->dev_lock);
+ if (ret)
+ return NULL;
+
+ qdev->qddev = qddev;
qdev->pdev = pdev;
+ qddev->qdev = qdev;
- mutex_init(&qdev->cntl_mutex);
INIT_LIST_HEAD(&qdev->cntl_xfer_list);
- init_srcu_struct(&qdev->dev_lock);
+ INIT_LIST_HEAD(&qdev->bootlog);
+ INIT_LIST_HEAD(&qddev->users);
for (i = 0; i < qdev->num_dbc; ++i) {
spin_lock_init(&qdev->dbc[i].xfer_lock);
qdev->dbc[i].qdev = qdev;
qdev->dbc[i].id = i;
INIT_LIST_HEAD(&qdev->dbc[i].xfer_list);
- init_srcu_struct(&qdev->dbc[i].ch_lock);
+ ret = qaicm_srcu_init(drm, &qdev->dbc[i].ch_lock);
+ if (ret)
+ return NULL;
init_waitqueue_head(&qdev->dbc[i].dbc_release);
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
}
- qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
- if (IS_ERR(qddev)) {
- cleanup_qdev(qdev);
- return NULL;
- }
-
- drmm_mutex_init(to_drm(qddev), &qddev->users_mutex);
- INIT_LIST_HEAD(&qddev->users);
- qddev->qdev = qdev;
- qdev->qddev = qddev;
-
return qdev;
}
@@ -391,9 +447,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
return ret;
- ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- if (ret)
- return ret;
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
if (IS_ERR(qdev->bar_0))
@@ -472,35 +526,28 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = init_pci(qdev, pdev);
if (ret)
- goto cleanup_qdev;
+ return ret;
for (i = 0; i < qdev->num_dbc; ++i)
qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
mhi_irq = init_msi(qdev, pdev);
- if (mhi_irq < 0) {
- ret = mhi_irq;
- goto cleanup_qdev;
- }
+ if (mhi_irq < 0)
+ return mhi_irq;
ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
if (ret)
- goto cleanup_qdev;
+ return ret;
qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
qdev->single_msi);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
- goto cleanup_drm_dev;
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
+ return ret;
}
return 0;
-
-cleanup_drm_dev:
- qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
-cleanup_qdev:
- cleanup_qdev(qdev);
- return ret;
}
static void qaic_pci_remove(struct pci_dev *pdev)
@@ -511,9 +558,8 @@ static void qaic_pci_remove(struct pci_dev *pdev)
return;
qaic_dev_reset_clean_local_state(qdev);
- qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
- cleanup_qdev(qdev);
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
}
static void qaic_pci_shutdown(struct pci_dev *pdev)
@@ -561,6 +607,7 @@ static struct mhi_driver qaic_mhi_driver = {
};
static const struct pci_device_id qaic_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC080), },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
{ }
};
@@ -597,12 +644,24 @@ static int __init qaic_init(void)
goto free_pci;
}
+ ret = sahara_register();
+ if (ret) {
+ pr_debug("qaic: sahara_register failed %d\n", ret);
+ goto free_mhi;
+ }
+
ret = qaic_timesync_init();
if (ret)
pr_debug("qaic: qaic_timesync_init failed %d\n", ret);
+ ret = qaic_bootlog_register();
+ if (ret)
+ pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
+
return 0;
+free_mhi:
+ mhi_driver_unregister(&qaic_mhi_driver);
free_pci:
pci_unregister_driver(&qaic_pci_driver);
return ret;
@@ -626,7 +685,9 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_bootlog_unregister();
qaic_timesync_deinit();
+ sahara_unregister();
mhi_driver_unregister(&qaic_mhi_driver);
pci_unregister_driver(&qaic_pci_driver);
}
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
new file mode 100644
index 000000000000..21d58aed0deb
--- /dev/null
+++ b/drivers/accel/qaic/sahara.c
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+#include <linux/limits.h>
+#include <linux/mhi.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/overflow.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include "sahara.h"
+
+#define SAHARA_HELLO_CMD 0x1 /* Min protocol version 1.0 */
+#define SAHARA_HELLO_RESP_CMD 0x2 /* Min protocol version 1.0 */
+#define SAHARA_READ_DATA_CMD 0x3 /* Min protocol version 1.0 */
+#define SAHARA_END_OF_IMAGE_CMD 0x4 /* Min protocol version 1.0 */
+#define SAHARA_DONE_CMD 0x5 /* Min protocol version 1.0 */
+#define SAHARA_DONE_RESP_CMD 0x6 /* Min protocol version 1.0 */
+#define SAHARA_RESET_CMD 0x7 /* Min protocol version 1.0 */
+#define SAHARA_RESET_RESP_CMD 0x8 /* Min protocol version 1.0 */
+#define SAHARA_MEM_DEBUG_CMD 0x9 /* Min protocol version 2.0 */
+#define SAHARA_MEM_READ_CMD 0xa /* Min protocol version 2.0 */
+#define SAHARA_CMD_READY_CMD 0xb /* Min protocol version 2.1 */
+#define SAHARA_SWITCH_MODE_CMD 0xc /* Min protocol version 2.1 */
+#define SAHARA_EXECUTE_CMD 0xd /* Min protocol version 2.1 */
+#define SAHARA_EXECUTE_RESP_CMD 0xe /* Min protocol version 2.1 */
+#define SAHARA_EXECUTE_DATA_CMD 0xf /* Min protocol version 2.1 */
+#define SAHARA_MEM_DEBUG64_CMD 0x10 /* Min protocol version 2.5 */
+#define SAHARA_MEM_READ64_CMD 0x11 /* Min protocol version 2.5 */
+#define SAHARA_READ_DATA64_CMD 0x12 /* Min protocol version 2.8 */
+#define SAHARA_RESET_STATE_CMD 0x13 /* Min protocol version 2.9 */
+#define SAHARA_WRITE_DATA_CMD 0x14 /* Min protocol version 3.0 */
+
+#define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */
+#define SAHARA_TRANSFER_MAX_SIZE 0x80000
+#define SAHARA_READ_MAX_SIZE 0xfff0U /* Avoid unaligned requests */
+#define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\
+ SAHARA_PACKET_MAX_SIZE)
+#define SAHARA_IMAGE_ID_NONE U32_MAX
+
+#define SAHARA_VERSION 2
+#define SAHARA_SUCCESS 0
+#define SAHARA_TABLE_ENTRY_STR_LEN 20
+
+#define SAHARA_MODE_IMAGE_TX_PENDING 0x0
+#define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1
+#define SAHARA_MODE_MEMORY_DEBUG 0x2
+#define SAHARA_MODE_COMMAND 0x3
+
+#define SAHARA_HELLO_LENGTH 0x30
+#define SAHARA_READ_DATA_LENGTH 0x14
+#define SAHARA_END_OF_IMAGE_LENGTH 0x10
+#define SAHARA_DONE_LENGTH 0x8
+#define SAHARA_RESET_LENGTH 0x8
+#define SAHARA_MEM_DEBUG64_LENGTH 0x18
+#define SAHARA_MEM_READ64_LENGTH 0x18
+
+struct sahara_packet {
+ __le32 cmd;
+ __le32 length;
+
+ union {
+ struct {
+ __le32 version;
+ __le32 version_compat;
+ __le32 max_length;
+ __le32 mode;
+ } hello;
+ struct {
+ __le32 version;
+ __le32 version_compat;
+ __le32 status;
+ __le32 mode;
+ } hello_resp;
+ struct {
+ __le32 image;
+ __le32 offset;
+ __le32 length;
+ } read_data;
+ struct {
+ __le32 image;
+ __le32 status;
+ } end_of_image;
+ struct {
+ __le64 table_address;
+ __le64 table_length;
+ } memory_debug64;
+ struct {
+ __le64 memory_address;
+ __le64 memory_length;
+ } memory_read64;
+ };
+};
+
+struct sahara_debug_table_entry64 {
+ __le64 type;
+ __le64 address;
+ __le64 length;
+ char description[SAHARA_TABLE_ENTRY_STR_LEN];
+ char filename[SAHARA_TABLE_ENTRY_STR_LEN];
+};
+
+struct sahara_dump_table_entry {
+ u64 type;
+ u64 address;
+ u64 length;
+ char description[SAHARA_TABLE_ENTRY_STR_LEN];
+ char filename[SAHARA_TABLE_ENTRY_STR_LEN];
+};
+
+#define SAHARA_DUMP_V1_MAGIC 0x1234567890abcdef
+#define SAHARA_DUMP_V1_VER 1
+struct sahara_memory_dump_meta_v1 {
+ u64 magic;
+ u64 version;
+ u64 dump_size;
+ u64 table_size;
+};
+
+/*
+ * Layout of crashdump provided to user via devcoredump
+ * +------------------------------------------+
+ * | Crashdump Meta structure |
+ * | type: struct sahara_memory_dump_meta_v1 |
+ * +------------------------------------------+
+ * | Crashdump Table |
+ * | type: array of struct |
+ * | sahara_dump_table_entry |
+ * | |
+ * | |
+ * +------------------------------------------+
+ * | Crashdump |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +------------------------------------------+
+ *
+ * First is the metadata header. Userspace can use the magic number to verify
+ * the content type, and then check the version for the rest of the format.
+ * New versions should keep the magic number location/value, and version
+ * location, but increment the version value.
+ *
+ * For v1, the metadata lists the size of the entire dump (header + table +
+ * dump) and the size of the table. Then the dump image table, which describes
+ * the contents of the dump. Finally all the images are listed in order, with
+ * no deadspace in between. Userspace can use the sizes listed in the image
+ * table to reconstruct the individual images.
+ */
+
+struct sahara_context {
+ struct sahara_packet *tx[SAHARA_NUM_TX_BUF];
+ struct sahara_packet *rx;
+ struct work_struct fw_work;
+ struct work_struct dump_work;
+ struct mhi_device *mhi_dev;
+ const char **image_table;
+ u32 table_size;
+ u32 active_image_id;
+ const struct firmware *firmware;
+ u64 dump_table_address;
+ u64 dump_table_length;
+ size_t rx_size;
+ size_t rx_size_requested;
+ void *mem_dump;
+ size_t mem_dump_sz;
+ struct sahara_dump_table_entry *dump_image;
+ u64 dump_image_offset;
+ void *mem_dump_freespace;
+ u64 dump_images_left;
+ bool is_mem_dump_mode;
+};
+
+static const char *aic100_image_table[] = {
+ [1] = "qcom/aic100/fw1.bin",
+ [2] = "qcom/aic100/fw2.bin",
+ [4] = "qcom/aic100/fw4.bin",
+ [5] = "qcom/aic100/fw5.bin",
+ [6] = "qcom/aic100/fw6.bin",
+ [8] = "qcom/aic100/fw8.bin",
+ [9] = "qcom/aic100/fw9.bin",
+ [10] = "qcom/aic100/fw10.bin",
+};
+
+static int sahara_find_image(struct sahara_context *context, u32 image_id)
+{
+ int ret;
+
+ if (image_id == context->active_image_id)
+ return 0;
+
+ if (context->active_image_id != SAHARA_IMAGE_ID_NONE) {
+ dev_err(&context->mhi_dev->dev, "image id %d is not valid as %d is active\n",
+ image_id, context->active_image_id);
+ return -EINVAL;
+ }
+
+ if (image_id >= context->table_size || !context->image_table[image_id]) {
+ dev_err(&context->mhi_dev->dev, "request for unknown image: %d\n", image_id);
+ return -EINVAL;
+ }
+
+ /*
+ * This image might be optional. The device may continue without it.
+ * Only the device knows. Suppress error messages that could suggest an
+ * a problem when we were actually able to continue.
+ */
+ ret = firmware_request_nowarn(&context->firmware,
+ context->image_table[image_id],
+ &context->mhi_dev->dev);
+ if (ret) {
+ dev_dbg(&context->mhi_dev->dev, "request for image id %d / file %s failed %d\n",
+ image_id, context->image_table[image_id], ret);
+ return ret;
+ }
+
+ context->active_image_id = image_id;
+
+ return 0;
+}
+
+static void sahara_release_image(struct sahara_context *context)
+{
+ if (context->active_image_id != SAHARA_IMAGE_ID_NONE)
+ release_firmware(context->firmware);
+ context->active_image_id = SAHARA_IMAGE_ID_NONE;
+}
+
+static void sahara_send_reset(struct sahara_context *context)
+{
+ int ret;
+
+ context->is_mem_dump_mode = false;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_RESET_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send reset response %d\n", ret);
+}
+
+static void sahara_hello(struct sahara_context *context)
+{
+ int ret;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "HELLO cmd received. length:%d version:%d version_compat:%d max_length:%d mode:%d\n",
+ le32_to_cpu(context->rx->length),
+ le32_to_cpu(context->rx->hello.version),
+ le32_to_cpu(context->rx->hello.version_compat),
+ le32_to_cpu(context->rx->hello.max_length),
+ le32_to_cpu(context->rx->hello.mode));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_HELLO_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed hello packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+ if (le32_to_cpu(context->rx->hello.version) != SAHARA_VERSION) {
+ dev_err(&context->mhi_dev->dev, "Unsupported hello packet - version %d\n",
+ le32_to_cpu(context->rx->hello.version));
+ return;
+ }
+
+ if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING &&
+ le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE &&
+ le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_MEMORY_DEBUG) {
+ dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n",
+ le32_to_cpu(context->rx->hello.mode));
+ return;
+ }
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_HELLO_RESP_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_HELLO_LENGTH);
+ context->tx[0]->hello_resp.version = cpu_to_le32(SAHARA_VERSION);
+ context->tx[0]->hello_resp.version_compat = cpu_to_le32(SAHARA_VERSION);
+ context->tx[0]->hello_resp.status = cpu_to_le32(SAHARA_SUCCESS);
+ context->tx[0]->hello_resp.mode = context->rx->hello_resp.mode;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_HELLO_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret);
+}
+
+static void sahara_read_data(struct sahara_context *context)
+{
+ u32 image_id, data_offset, data_len, pkt_data_len;
+ int ret;
+ int i;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "READ_DATA cmd received. length:%d image:%d offset:%d data_length:%d\n",
+ le32_to_cpu(context->rx->length),
+ le32_to_cpu(context->rx->read_data.image),
+ le32_to_cpu(context->rx->read_data.offset),
+ le32_to_cpu(context->rx->read_data.length));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_READ_DATA_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+
+ image_id = le32_to_cpu(context->rx->read_data.image);
+ data_offset = le32_to_cpu(context->rx->read_data.offset);
+ data_len = le32_to_cpu(context->rx->read_data.length);
+
+ ret = sahara_find_image(context, image_id);
+ if (ret) {
+ sahara_send_reset(context);
+ return;
+ }
+
+ /*
+ * Image is released when the device is done with it via
+ * SAHARA_END_OF_IMAGE_CMD. sahara_send_reset() will either cause the
+ * device to retry the operation with a modification, or decide to be
+ * done with the image and trigger SAHARA_END_OF_IMAGE_CMD.
+ * release_image() is called from SAHARA_END_OF_IMAGE_CMD. processing
+ * and is not needed here on error.
+ */
+
+ if (data_len > SAHARA_TRANSFER_MAX_SIZE) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n",
+ data_len, SAHARA_TRANSFER_MAX_SIZE);
+ sahara_send_reset(context);
+ return;
+ }
+
+ if (data_offset >= context->firmware->size) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d exceeds file size %zu\n",
+ data_offset, context->firmware->size);
+ sahara_send_reset(context);
+ return;
+ }
+
+ if (size_add(data_offset, data_len) > context->firmware->size) {
+ dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d and length %d exceeds file size %zu\n",
+ data_offset, data_len, context->firmware->size);
+ sahara_send_reset(context);
+ return;
+ }
+
+ for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) {
+ pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE);
+
+ memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len);
+
+ data_offset += pkt_data_len;
+ data_len -= pkt_data_len;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
+ context->tx[i], pkt_data_len,
+ !data_len ? MHI_EOT : MHI_CHAIN);
+ if (ret) {
+ dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n",
+ ret);
+ return;
+ }
+ }
+}
+
+static void sahara_end_of_image(struct sahara_context *context)
+{
+ int ret;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "END_OF_IMAGE cmd received. length:%d image:%d status:%d\n",
+ le32_to_cpu(context->rx->length),
+ le32_to_cpu(context->rx->end_of_image.image),
+ le32_to_cpu(context->rx->end_of_image.status));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_END_OF_IMAGE_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+
+ if (context->active_image_id != SAHARA_IMAGE_ID_NONE &&
+ le32_to_cpu(context->rx->end_of_image.image) != context->active_image_id) {
+ dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - image %d is not the active image\n",
+ le32_to_cpu(context->rx->end_of_image.image));
+ return;
+ }
+
+ sahara_release_image(context);
+
+ if (le32_to_cpu(context->rx->end_of_image.status))
+ return;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_DONE_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_DONE_LENGTH);
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_DONE_LENGTH, MHI_EOT);
+ if (ret)
+ dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret);
+}
+
+static void sahara_memory_debug64(struct sahara_context *context)
+{
+ int ret;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "MEMORY DEBUG64 cmd received. length:%d table_address:%#llx table_length:%#llx\n",
+ le32_to_cpu(context->rx->length),
+ le64_to_cpu(context->rx->memory_debug64.table_address),
+ le64_to_cpu(context->rx->memory_debug64.table_length));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_MEM_DEBUG64_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+
+ context->dump_table_address = le64_to_cpu(context->rx->memory_debug64.table_address);
+ context->dump_table_length = le64_to_cpu(context->rx->memory_debug64.table_length);
+
+ if (context->dump_table_length % sizeof(struct sahara_debug_table_entry64) != 0 ||
+ !context->dump_table_length) {
+ dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - table length %lld\n",
+ context->dump_table_length);
+ return;
+ }
+
+ /*
+ * From this point, the protocol flips. We make memory_read requests to
+ * the device, and the device responds with the raw data. If the device
+ * has an error, it will send an End of Image command. First we need to
+ * request the memory dump table so that we know where all the pieces
+ * of the dump are that we can consume.
+ */
+
+ context->is_mem_dump_mode = true;
+
+ /*
+ * Assume that the table is smaller than our MTU so that we can read it
+ * in one shot. The spec does not put an upper limit on the table, but
+ * no known device will exceed this.
+ */
+ if (context->dump_table_length > SAHARA_PACKET_MAX_SIZE) {
+ dev_err(&context->mhi_dev->dev, "Memory dump table length %lld exceeds supported size. Discarding dump\n",
+ context->dump_table_length);
+ sahara_send_reset(context);
+ return;
+ }
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_table_address);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(context->dump_table_length);
+
+ context->rx_size_requested = context->dump_table_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send read for dump table %d\n", ret);
+}
+
+static void sahara_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, fw_work);
+ int ret;
+
+ switch (le32_to_cpu(context->rx->cmd)) {
+ case SAHARA_HELLO_CMD:
+ sahara_hello(context);
+ break;
+ case SAHARA_READ_DATA_CMD:
+ sahara_read_data(context);
+ break;
+ case SAHARA_END_OF_IMAGE_CMD:
+ sahara_end_of_image(context);
+ break;
+ case SAHARA_DONE_RESP_CMD:
+ /* Intentional do nothing as we don't need to exit an app */
+ break;
+ case SAHARA_RESET_RESP_CMD:
+ /* Intentional do nothing as we don't need to exit an app */
+ break;
+ case SAHARA_MEM_DEBUG64_CMD:
+ sahara_memory_debug64(context);
+ break;
+ default:
+ dev_err(&context->mhi_dev->dev, "Unknown command %d\n",
+ le32_to_cpu(context->rx->cmd));
+ break;
+ }
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
+ SAHARA_PACKET_MAX_SIZE, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
+}
+
+static void sahara_parse_dump_table(struct sahara_context *context)
+{
+ struct sahara_dump_table_entry *image_out_table;
+ struct sahara_debug_table_entry64 *dev_table;
+ struct sahara_memory_dump_meta_v1 *dump_meta;
+ u64 table_nents;
+ u64 dump_length;
+ int ret;
+ u64 i;
+
+ table_nents = context->dump_table_length / sizeof(*dev_table);
+ context->dump_images_left = table_nents;
+ dump_length = 0;
+
+ dev_table = (struct sahara_debug_table_entry64 *)(context->rx);
+ for (i = 0; i < table_nents; ++i) {
+ /* Do not trust the device, ensure the strings are terminated */
+ dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
+ dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
+
+ dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length));
+ if (dump_length == SIZE_MAX) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ dev_dbg(&context->mhi_dev->dev,
+ "Memory dump table entry %lld type: %lld address: %#llx length: %#llx description: \"%s\" filename \"%s\"\n",
+ i,
+ le64_to_cpu(dev_table[i].type),
+ le64_to_cpu(dev_table[i].address),
+ le64_to_cpu(dev_table[i].length),
+ dev_table[i].description,
+ dev_table[i].filename);
+ }
+
+ dump_length = size_add(dump_length, sizeof(*dump_meta));
+ if (dump_length == SIZE_MAX) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+ dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents));
+ if (dump_length == SIZE_MAX) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ context->mem_dump_sz = dump_length;
+ context->mem_dump = vzalloc(dump_length);
+ if (!context->mem_dump) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ /* Populate the dump metadata and table for userspace */
+ dump_meta = context->mem_dump;
+ dump_meta->magic = SAHARA_DUMP_V1_MAGIC;
+ dump_meta->version = SAHARA_DUMP_V1_VER;
+ dump_meta->dump_size = dump_length;
+ dump_meta->table_size = context->dump_table_length;
+
+ image_out_table = context->mem_dump + sizeof(*dump_meta);
+ for (i = 0; i < table_nents; ++i) {
+ image_out_table[i].type = le64_to_cpu(dev_table[i].type);
+ image_out_table[i].address = le64_to_cpu(dev_table[i].address);
+ image_out_table[i].length = le64_to_cpu(dev_table[i].length);
+ strscpy(image_out_table[i].description, dev_table[i].description,
+ SAHARA_TABLE_ENTRY_STR_LEN);
+ strscpy(image_out_table[i].filename,
+ dev_table[i].filename,
+ SAHARA_TABLE_ENTRY_STR_LEN);
+ }
+
+ context->mem_dump_freespace = &image_out_table[i];
+
+ /* Done parsing the table, switch to image dump mode */
+ context->dump_table_length = 0;
+
+ /* Request the first chunk of the first image */
+ context->dump_image = &image_out_table[0];
+ dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE);
+ /* Avoid requesting EOI sized data so that we can identify errors */
+ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
+ dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
+
+ context->dump_image_offset = dump_length;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_image->address);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
+
+ context->rx_size_requested = dump_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send read for dump content %d\n", ret);
+}
+
+static void sahara_parse_dump_image(struct sahara_context *context)
+{
+ u64 dump_length;
+ int ret;
+
+ memcpy(context->mem_dump_freespace, context->rx, context->rx_size);
+ context->mem_dump_freespace += context->rx_size;
+
+ if (context->dump_image_offset >= context->dump_image->length) {
+ /* Need to move to next image */
+ context->dump_image++;
+ context->dump_images_left--;
+ context->dump_image_offset = 0;
+
+ if (!context->dump_images_left) {
+ /* Dump done */
+ dev_coredumpv(context->mhi_dev->mhi_cntrl->cntrl_dev,
+ context->mem_dump,
+ context->mem_dump_sz,
+ GFP_KERNEL);
+ context->mem_dump = NULL;
+ sahara_send_reset(context);
+ return;
+ }
+ }
+
+ /* Get next image chunk */
+ dump_length = context->dump_image->length - context->dump_image_offset;
+ dump_length = min(dump_length, SAHARA_READ_MAX_SIZE);
+ /* Avoid requesting EOI sized data so that we can identify errors */
+ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
+ dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address =
+ cpu_to_le64(context->dump_image->address + context->dump_image_offset);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
+
+ context->dump_image_offset += dump_length;
+ context->rx_size_requested = dump_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev,
+ "Unable to send read for dump content %d\n", ret);
+}
+
+static void sahara_dump_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, dump_work);
+ int ret;
+
+ /*
+ * We should get the expected raw data, but if the device has an error
+ * it is supposed to send EOI with an error code.
+ */
+ if (context->rx_size != context->rx_size_requested &&
+ context->rx_size != SAHARA_END_OF_IMAGE_LENGTH) {
+ dev_err(&context->mhi_dev->dev,
+ "Unexpected response to read_data. Expected size: %#zx got: %#zx\n",
+ context->rx_size_requested,
+ context->rx_size);
+ goto error;
+ }
+
+ if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
+ le32_to_cpu(context->rx->cmd) == SAHARA_END_OF_IMAGE_CMD) {
+ dev_err(&context->mhi_dev->dev,
+ "Unexpected EOI response to read_data. Status: %d\n",
+ le32_to_cpu(context->rx->end_of_image.status));
+ goto error;
+ }
+
+ if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
+ le32_to_cpu(context->rx->cmd) != SAHARA_END_OF_IMAGE_CMD) {
+ dev_err(&context->mhi_dev->dev,
+ "Invalid EOI response to read_data. CMD: %d\n",
+ le32_to_cpu(context->rx->cmd));
+ goto error;
+ }
+
+ /*
+ * Need to know if we received the dump table, or part of a dump image.
+ * Since we get raw data, we cannot tell from the data itself. Instead,
+ * we use the stored dump_table_length, which we zero after we read and
+ * process the entire table.
+ */
+ if (context->dump_table_length)
+ sahara_parse_dump_table(context);
+ else
+ sahara_parse_dump_image(context);
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
+ SAHARA_PACKET_MAX_SIZE, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
+
+ return;
+
+error:
+ vfree(context->mem_dump);
+ context->mem_dump = NULL;
+ sahara_send_reset(context);
+}
+
+static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct sahara_context *context;
+ int ret;
+ int i;
+
+ context = devm_kzalloc(&mhi_dev->dev, sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ context->rx = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
+ if (!context->rx)
+ return -ENOMEM;
+
+ /*
+ * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it
+ * will request for READ_DATA. This is larger than
+ * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to
+ * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a
+ * READ_DATA, it requires a transfer of the exact size requested. We
+ * can use MHI_CHAIN to link multiple buffers into a single transfer
+ * but the remote side will not consume the buffers until it sees an
+ * EOT, thus we need to allocate enough buffers to put in the tx fifo
+ * to cover an entire READ_DATA request of the max size.
+ */
+ for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) {
+ context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
+ if (!context->tx[i])
+ return -ENOMEM;
+ }
+
+ context->mhi_dev = mhi_dev;
+ INIT_WORK(&context->fw_work, sahara_processing);
+ INIT_WORK(&context->dump_work, sahara_dump_processing);
+ context->image_table = aic100_image_table;
+ context->table_size = ARRAY_SIZE(aic100_image_table);
+ context->active_image_id = SAHARA_IMAGE_ID_NONE;
+ dev_set_drvdata(&mhi_dev->dev, context);
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, context->rx, SAHARA_PACKET_MAX_SIZE, MHI_EOT);
+ if (ret) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sahara_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
+
+ cancel_work_sync(&context->fw_work);
+ cancel_work_sync(&context->dump_work);
+ vfree(context->mem_dump);
+ sahara_release_image(context);
+ mhi_unprepare_from_transfer(mhi_dev);
+}
+
+static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+}
+
+static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
+
+ if (!mhi_result->transaction_status) {
+ context->rx_size = mhi_result->bytes_xferd;
+ if (context->is_mem_dump_mode)
+ schedule_work(&context->dump_work);
+ else
+ schedule_work(&context->fw_work);
+ }
+
+}
+
+static const struct mhi_device_id sahara_mhi_match_table[] = {
+ { .chan = "QAIC_SAHARA", },
+ {},
+};
+
+static struct mhi_driver sahara_mhi_driver = {
+ .id_table = sahara_mhi_match_table,
+ .remove = sahara_mhi_remove,
+ .probe = sahara_mhi_probe,
+ .ul_xfer_cb = sahara_mhi_ul_xfer_cb,
+ .dl_xfer_cb = sahara_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "sahara",
+ },
+};
+
+int sahara_register(void)
+{
+ return mhi_driver_register(&sahara_mhi_driver);
+}
+
+void sahara_unregister(void)
+{
+ mhi_driver_unregister(&sahara_mhi_driver);
+}
diff --git a/drivers/accel/qaic/sahara.h b/drivers/accel/qaic/sahara.h
new file mode 100644
index 000000000000..640208acc0d1
--- /dev/null
+++ b/drivers/accel/qaic/sahara.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#ifndef __SAHARA_H__
+#define __SAHARA_H__
+
+int sahara_register(void);
+void sahara_unregister(void);
+#endif /* __SAHARA_H__ */