summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
diff options
context:
space:
mode:
authorDmitry Bezrukov <dbezrukov@marvell.com>2020-05-22 11:19:40 +0300
committerDavid S. Miller <davem@davemloft.net>2020-05-22 14:08:28 -0700
commita83fe6b6ad6b10f6912025ae23bd5c2596a4e7f4 (patch)
treef6a62568c6f6ed691faea395159b5417b3d6ee9a /drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
parent0aa7bc3ee4652e0790f9b42c93c769b59b9f2308 (diff)
net: atlantic: QoS implementation: multi-TC support
This patch adds multi-TC support. PTP is automatically disabled when the user enables more than 2 TCs, otherwise traffic on TC2 won't quite work, because it's reserved for PTP. Signed-off-by: Dmitry Bezrukov <dbezrukov@marvell.com> Co-developed-by: Dmitry Bogdanov <dbogdanov@marvell.com> Signed-off-by: Dmitry Bogdanov <dbogdanov@marvell.com> Co-developed-by: Mark Starovoytov <mstarovoitov@marvell.com> Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com> Signed-off-by: Igor Russkikh <irusskikh@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c')
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c74
1 files changed, 39 insertions, 35 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 7caf586ea56c..775382440b47 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -46,7 +46,8 @@
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_GSO_UDP_L4 | \
- NETIF_F_GSO_PARTIAL, \
+ NETIF_F_GSO_PARTIAL | \
+ NETIF_F_HW_TC, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@@ -134,7 +135,7 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
u32 tx_buff_size = HW_ATL_B0_TXBUF_MAX;
u32 rx_buff_size = HW_ATL_B0_RXBUF_MAX;
- unsigned int i_priority = 0U;
+ unsigned int prio = 0U;
u32 tc = 0U;
if (cfg->is_ptp) {
@@ -153,42 +154,45 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- /* TX Packet Scheduler Data TC0 */
- hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
- hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
- hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
- hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
-
- /* Tx buf size TC0 */
- hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
- hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
- (tx_buff_size *
- (1024 / 32U) * 66U) /
- 100U, tc);
- hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
- (tx_buff_size *
- (1024 / 32U) * 50U) /
- 100U, tc);
-
- /* QoS Rx buf size per TC */
- hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
- hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
- (rx_buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
- (rx_buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
-
- hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
+ tx_buff_size /= cfg->tcs;
+ rx_buff_size /= cfg->tcs;
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ u32 threshold = 0U;
+
+ /* TX Packet Scheduler Data TC0 */
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
+
+ /* Tx buf size TC0 */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ /* QoS Rx buf size per TC */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
+ }
if (cfg->is_ptp)
hw_atl_b0_tc_ptp_set(self);
/* QoS 802.1p priority -> TC mapping */
- for (i_priority = 8U; i_priority--;)
- hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ for (prio = 0; prio < 8; ++prio)
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
+ cfg->prio_tc_map[prio]);
return aq_hw_err_from_flags(self);
}
@@ -319,7 +323,7 @@ int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
/* Tx TC/Queue number config */
- hw_atl_tpb_tps_tx_tc_mode_set(self, 1U);
+ hw_atl_tpb_tps_tx_tc_mode_set(self, self->aq_nic_cfg->tc_mode);
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
@@ -345,7 +349,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
- hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
/* Rx flow control */
hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);