summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
blob: af07a4a6edc5becfe0dc515839c9a47ce1387c37 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
// SPDX-License-Identifier: GPL-2.0
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
 *
 * Copyright (C) 2020 Marvell.
 *
 */

#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>

#include "octep_vf_config.h"
#include "octep_vf_main.h"
#include "octep_vf_regs_cnxk.h"

/* Reset all hardware Tx/Rx queues */
static void octep_vf_reset_io_queues_cnxk(struct octep_vf_device *oct)
{
}

/* Initialize configuration limits and initial active config */
static void octep_vf_init_config_cnxk_vf(struct octep_vf_device *oct)
{
	struct octep_vf_config *conf = oct->conf;
	u64 reg_val;

	reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(0));
	conf->ring_cfg.max_io_rings = (reg_val >> CNXK_VF_R_IN_CTL_RPVF_POS) &
				      CNXK_VF_R_IN_CTL_RPVF_MASK;
	conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;

	conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
	conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
	conf->iq.db_min = OCTEP_VF_DB_MIN;
	conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;

	conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
	conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
	conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
	conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
	conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
	conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN;

	conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
}

/* Setup registers for a hardware Tx Queue  */
static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no)
{
}

/* Setup registers for a hardware Rx Queue  */
static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
{
}

/* Setup registers for a VF mailbox */
static void octep_vf_setup_mbox_regs_cnxk(struct octep_vf_device *oct, int q_no)
{
}

/* Tx/Rx queue interrupt handler */
static irqreturn_t octep_vf_ioq_intr_handler_cnxk(void *data)
{
	return IRQ_HANDLED;
}

/* Re-initialize Octeon hardware registers */
static void octep_vf_reinit_regs_cnxk(struct octep_vf_device *oct)
{
}

/* Enable all interrupts */
static void octep_vf_enable_interrupts_cnxk(struct octep_vf_device *oct)
{
}

/* Disable all interrupts */
static void octep_vf_disable_interrupts_cnxk(struct octep_vf_device *oct)
{
}

/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
static u32 octep_vf_update_iq_read_index_cnxk(struct octep_vf_iq *iq)
{
	return 0;
}

/* Enable a hardware Tx Queue */
static void octep_vf_enable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
{
}

/* Enable a hardware Rx Queue */
static void octep_vf_enable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
{
}

/* Enable all hardware Tx/Rx Queues assigned to VF */
static void octep_vf_enable_io_queues_cnxk(struct octep_vf_device *oct)
{
}

/* Disable a hardware Tx Queue assigned to VF */
static void octep_vf_disable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
{
}

/* Disable a hardware Rx Queue assigned to VF */
static void octep_vf_disable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
{
}

/* Disable all hardware Tx/Rx Queues assigned to VF */
static void octep_vf_disable_io_queues_cnxk(struct octep_vf_device *oct)
{
}

/* Dump hardware registers (including Tx/Rx queues) for debugging. */
static void octep_vf_dump_registers_cnxk(struct octep_vf_device *oct)
{
}

/**
 * octep_vf_device_setup_cnxk() - Setup Octeon device.
 *
 * @oct: Octeon device private data structure.
 *
 * - initialize hardware operations.
 * - get target side pcie port number for the device.
 * - set initial configuration and max limits.
 */
void octep_vf_device_setup_cnxk(struct octep_vf_device *oct)
{
	oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cnxk;
	oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cnxk;
	oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cnxk;

	oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cnxk;
	oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cnxk;

	oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cnxk;
	oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cnxk;

	oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cnxk;

	oct->hw_ops.enable_iq = octep_vf_enable_iq_cnxk;
	oct->hw_ops.enable_oq = octep_vf_enable_oq_cnxk;
	oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cnxk;

	oct->hw_ops.disable_iq = octep_vf_disable_iq_cnxk;
	oct->hw_ops.disable_oq = octep_vf_disable_oq_cnxk;
	oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cnxk;
	oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cnxk;

	oct->hw_ops.dump_registers = octep_vf_dump_registers_cnxk;
	octep_vf_init_config_cnxk_vf(oct);
}