From a83fe6b6ad6b10f6912025ae23bd5c2596a4e7f4 Mon Sep 17 00:00:00 2001 From: Dmitry Bezrukov Date: Fri, 22 May 2020 11:19:40 +0300 Subject: net: atlantic: QoS implementation: multi-TC support This patch adds multi-TC support. PTP is automatically disabled when the user enables more than 2 TCs, otherwise traffic on TC2 won't quite work, because it's reserved for PTP. Signed-off-by: Dmitry Bezrukov Co-developed-by: Dmitry Bogdanov Signed-off-by: Dmitry Bogdanov Co-developed-by: Mark Starovoytov Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | 74 ++++++++++++---------- 1 file changed, 39 insertions(+), 35 deletions(-) (limited to 'drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c') diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 7caf586ea56c..775382440b47 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -46,7 +46,8 @@ NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_GSO_UDP_L4 | \ - NETIF_F_GSO_PARTIAL, \ + NETIF_F_GSO_PARTIAL | \ + NETIF_F_HW_TC, \ .hw_priv_flags = IFF_UNICAST_FLT, \ .flow_control = true, \ .mtu = HW_ATL_B0_MTU_JUMBO, \ @@ -134,7 +135,7 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; u32 tx_buff_size = HW_ATL_B0_TXBUF_MAX; u32 rx_buff_size = HW_ATL_B0_RXBUF_MAX; - unsigned int i_priority = 0U; + unsigned int prio = 0U; u32 tc = 0U; if (cfg->is_ptp) { @@ -153,42 +154,45 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U); - /* TX Packet Scheduler Data TC0 */ - hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc); - hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc); - hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc); - hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc); - - /* Tx buf size TC0 */ - hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc); - hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, - (tx_buff_size * - (1024 / 32U) * 66U) / - 100U, tc); - hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, - (tx_buff_size * - (1024 / 32U) * 50U) / - 100U, tc); - - /* QoS Rx buf size per TC */ - hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc); - hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, - (rx_buff_size * - (1024U / 32U) * 66U) / - 100U, tc); - hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, - (rx_buff_size * - (1024U / 32U) * 50U) / - 100U, tc); - - hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc); + tx_buff_size /= cfg->tcs; + rx_buff_size /= cfg->tcs; + for (tc = 0; tc < cfg->tcs; tc++) { + u32 threshold = 0U; + + /* TX Packet Scheduler Data TC0 */ + hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc); + hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc); + hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc); + hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc); + + /* Tx buf size TC0 */ + hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc); + + threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U; + hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc); + + threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U; + hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc); + + /* QoS Rx buf size per TC */ + hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc); + + threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U; + hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc); + + threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U; + hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc); + + hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc); + } if (cfg->is_ptp) hw_atl_b0_tc_ptp_set(self); /* QoS 802.1p priority -> TC mapping */ - for (i_priority = 8U; i_priority--;) - hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + for (prio = 0; prio < 8; ++prio) + hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio, + cfg->prio_tc_map[prio]); return aq_hw_err_from_flags(self); } @@ -319,7 +323,7 @@ int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) { /* Tx TC/Queue number config */ - hw_atl_tpb_tps_tx_tc_mode_set(self, 1U); + hw_atl_tpb_tps_tx_tc_mode_set(self, self->aq_nic_cfg->tc_mode); hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); @@ -345,7 +349,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) int i; /* Rx TC/RSS number config */ - hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); + hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode); /* Rx flow control */ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); -- cgit