1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
#include <linux/dma-mapping.h>
#include "hinic3_hwdev.h"
#include "hinic3_wq.h"
#define WQ_MIN_DEPTH 64
#define WQ_MAX_DEPTH 65536
#define WQ_PAGE_ADDR_SIZE sizeof(u64)
#define WQ_MAX_NUM_PAGES (HINIC3_MIN_PAGE_SIZE / WQ_PAGE_ADDR_SIZE)
static int wq_init_wq_block(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
{
struct hinic3_queue_pages *qpages = &wq->qpages;
int i;
if (hinic3_wq_is_0_level_cla(wq)) {
wq->wq_block_paddr = qpages->pages[0].align_paddr;
wq->wq_block_vaddr = qpages->pages[0].align_vaddr;
return 0;
}
if (wq->qpages.num_pages > WQ_MAX_NUM_PAGES) {
dev_err(hwdev->dev, "wq num_pages exceed limit: %lu\n",
WQ_MAX_NUM_PAGES);
return -EFAULT;
}
wq->wq_block_vaddr = dma_alloc_coherent(hwdev->dev,
HINIC3_MIN_PAGE_SIZE,
&wq->wq_block_paddr,
GFP_KERNEL);
if (!wq->wq_block_vaddr)
return -ENOMEM;
for (i = 0; i < qpages->num_pages; i++)
wq->wq_block_vaddr[i] = cpu_to_be64(qpages->pages[i].align_paddr);
return 0;
}
static int wq_alloc_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
{
int err;
err = hinic3_queue_pages_alloc(hwdev, &wq->qpages, 0);
if (err)
return err;
err = wq_init_wq_block(hwdev, wq);
if (err) {
hinic3_queue_pages_free(hwdev, &wq->qpages);
return err;
}
return 0;
}
static void wq_free_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
{
if (!hinic3_wq_is_0_level_cla(wq))
dma_free_coherent(hwdev->dev,
HINIC3_MIN_PAGE_SIZE,
wq->wq_block_vaddr,
wq->wq_block_paddr);
hinic3_queue_pages_free(hwdev, &wq->qpages);
}
int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq,
u32 q_depth, u16 wqebb_size)
{
u32 wq_page_size;
if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH ||
!is_power_of_2(q_depth) || !is_power_of_2(wqebb_size)) {
dev_err(hwdev->dev, "Invalid WQ: q_depth %u, wqebb_size %u\n",
q_depth, wqebb_size);
return -EINVAL;
}
wq_page_size = ALIGN(hwdev->wq_page_size, HINIC3_MIN_PAGE_SIZE);
memset(wq, 0, sizeof(*wq));
wq->q_depth = q_depth;
wq->idx_mask = q_depth - 1;
hinic3_queue_pages_init(&wq->qpages, q_depth, wq_page_size, wqebb_size);
return wq_alloc_pages(hwdev, wq);
}
void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
{
wq_free_pages(hwdev, wq);
}
void hinic3_wq_reset(struct hinic3_wq *wq)
{
struct hinic3_queue_pages *qpages = &wq->qpages;
u16 pg_idx;
wq->cons_idx = 0;
wq->prod_idx = 0;
for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++)
memset(qpages->pages[pg_idx].align_vaddr, 0, qpages->page_size);
}
void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
u16 num_wqebbs, u16 *prod_idx,
struct hinic3_sq_bufdesc **first_part_wqebbs,
struct hinic3_sq_bufdesc **second_part_wqebbs,
u16 *first_part_wqebbs_num)
{
u32 idx, remaining;
idx = wq->prod_idx & wq->idx_mask;
wq->prod_idx += num_wqebbs;
*prod_idx = idx;
*first_part_wqebbs = get_q_element(&wq->qpages, idx, &remaining);
if (likely(remaining >= num_wqebbs)) {
*first_part_wqebbs_num = num_wqebbs;
*second_part_wqebbs = NULL;
} else {
*first_part_wqebbs_num = remaining;
idx += remaining;
*second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL);
}
}
bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq)
{
return wq->qpages.num_pages == 1;
}
|