1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
#include <linux/netdevice.h>
#include "hinic3_hw_comm.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
#include "hinic3_nic_dev.h"
#include "hinic3_rx.h"
#include "hinic3_tx.h"
static int hinic3_poll(struct napi_struct *napi, int budget)
{
struct hinic3_irq_cfg *irq_cfg =
container_of(napi, struct hinic3_irq_cfg, napi);
struct hinic3_nic_dev *nic_dev;
bool busy = false;
int work_done;
nic_dev = netdev_priv(irq_cfg->netdev);
busy |= hinic3_tx_poll(irq_cfg->txq, budget);
if (unlikely(!budget))
return 0;
work_done = hinic3_rx_poll(irq_cfg->rxq, budget);
busy |= work_done >= budget;
if (busy)
return budget;
if (likely(napi_complete_done(napi, work_done)))
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_ENABLE);
return work_done;
}
static void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
napi_enable(&irq_cfg->napi);
}
static void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
{
napi_disable(&irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_RX, NULL);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_TX, NULL);
netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
netif_napi_del(&irq_cfg->napi);
}
static irqreturn_t qp_irq(int irq, void *data)
{
struct hinic3_irq_cfg *irq_cfg = data;
struct hinic3_nic_dev *nic_dev;
nic_dev = netdev_priv(irq_cfg->netdev);
hinic3_msix_intr_clear_resend_bit(nic_dev->hwdev,
irq_cfg->msix_entry_idx, 1);
napi_schedule(&irq_cfg->napi);
return IRQ_HANDLED;
}
static int hinic3_request_irq(struct hinic3_irq_cfg *irq_cfg, u16 q_id)
{
struct hinic3_interrupt_info info = {};
struct hinic3_nic_dev *nic_dev;
struct net_device *netdev;
int err;
netdev = irq_cfg->netdev;
nic_dev = netdev_priv(netdev);
qp_add_napi(irq_cfg);
info.msix_index = irq_cfg->msix_entry_idx;
info.interrupt_coalesc_set = 1;
info.pending_limit = nic_dev->intr_coalesce[q_id].pending_limit;
info.coalesc_timer_cfg =
nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
err = hinic3_set_interrupt_cfg_direct(nic_dev->hwdev, &info);
if (err) {
netdev_err(netdev, "Failed to set RX interrupt coalescing attribute.\n");
qp_del_napi(irq_cfg);
return err;
}
err = request_irq(irq_cfg->irq_id, qp_irq, 0, irq_cfg->irq_name,
irq_cfg);
if (err) {
qp_del_napi(irq_cfg);
return err;
}
irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask);
return 0;
}
static void hinic3_release_irq(struct hinic3_irq_cfg *irq_cfg)
{
irq_set_affinity_hint(irq_cfg->irq_id, NULL);
free_irq(irq_cfg->irq_id, irq_cfg);
}
int hinic3_qps_irq_init(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
struct pci_dev *pdev = nic_dev->pdev;
struct hinic3_irq_cfg *irq_cfg;
struct msix_entry *msix_entry;
u32 local_cpu;
u16 q_id;
int err;
for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
msix_entry = &nic_dev->qps_msix_entries[q_id];
irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
irq_cfg->irq_id = msix_entry->vector;
irq_cfg->msix_entry_idx = msix_entry->entry;
irq_cfg->netdev = netdev;
irq_cfg->txq = &nic_dev->txqs[q_id];
irq_cfg->rxq = &nic_dev->rxqs[q_id];
nic_dev->rxqs[q_id].irq_cfg = irq_cfg;
local_cpu = cpumask_local_spread(q_id, dev_to_node(&pdev->dev));
cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask);
snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name),
"%s_qp%u", netdev->name, q_id);
err = hinic3_request_irq(irq_cfg, q_id);
if (err) {
netdev_err(netdev, "Failed to request Rx irq\n");
goto err_release_irqs;
}
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
irq_cfg->msix_entry_idx,
HINIC3_SET_MSIX_AUTO_MASK);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_ENABLE);
}
return 0;
err_release_irqs:
while (q_id > 0) {
q_id--;
irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
qp_del_napi(irq_cfg);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_DISABLE);
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
irq_cfg->msix_entry_idx,
HINIC3_CLR_MSIX_AUTO_MASK);
hinic3_release_irq(irq_cfg);
}
return err;
}
void hinic3_qps_irq_uninit(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
struct hinic3_irq_cfg *irq_cfg;
u16 q_id;
for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
qp_del_napi(irq_cfg);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_DISABLE);
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
irq_cfg->msix_entry_idx,
HINIC3_CLR_MSIX_AUTO_MASK);
hinic3_release_irq(irq_cfg);
}
}
|