summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/realtek/rtase
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/realtek/rtase')
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h16
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c105
2 files changed, 110 insertions, 11 deletions
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index 2bbfcad613ab..20decdeb9fdb 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -170,6 +170,7 @@ enum rtase_registers {
#define RTASE_TC_MODE_MASK GENMASK(11, 10)
RTASE_TOKSEL = 0x2046,
+ RTASE_TXQCRDT_0 = 0x2500,
RTASE_RFIFONFULL = 0x4406,
RTASE_INT_MITI_TX = 0x0A00,
RTASE_INT_MITI_RX = 0x0A80,
@@ -259,6 +260,12 @@ union rtase_rx_desc {
#define RTASE_VLAN_TAG_MASK GENMASK(15, 0)
#define RTASE_RX_PKT_SIZE_MASK GENMASK(13, 0)
+/* txqos hardware definitions */
+#define RTASE_1T_CLOCK 64
+#define RTASE_1T_POWER 10000000
+#define RTASE_IDLESLOPE_INT_SHIFT 25
+#define RTASE_IDLESLOPE_INT_MASK GENMASK(31, 25)
+
#define RTASE_IVEC_NAME_SIZE (IFNAMSIZ + 10)
struct rtase_int_vector {
@@ -281,6 +288,7 @@ struct rtase_ring {
u32 cur_idx;
u32 dirty_idx;
u16 index;
+ u8 type;
struct sk_buff *skbuff[RTASE_NUM_DESC];
void *data_buf[RTASE_NUM_DESC];
@@ -294,6 +302,13 @@ struct rtase_ring {
u64 alloc_fail;
};
+struct rtase_txqos {
+ int hicredit;
+ int locredit;
+ int idleslope;
+ int sendslope;
+};
+
struct rtase_stats {
u64 tx_dropped;
u64 rx_dropped;
@@ -313,6 +328,7 @@ struct rtase_private {
struct page_pool *page_pool;
struct rtase_ring tx_ring[RTASE_NUM_TX_QUEUE];
+ struct rtase_txqos tx_qos[RTASE_NUM_TX_QUEUE];
struct rtase_ring rx_ring[RTASE_NUM_RX_QUEUE];
struct rtase_counters *tally_vaddr;
dma_addr_t tally_paddr;
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 55b8d3666153..ef13109c49cf 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -326,6 +326,7 @@ static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
ring->cur_idx = 0;
ring->dirty_idx = 0;
ring->index = idx;
+ ring->type = NETDEV_QUEUE_TYPE_TX;
ring->alloc_fail = 0;
for (i = 0; i < RTASE_NUM_DESC; i++) {
@@ -345,6 +346,9 @@ static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
ring->ivec = &tp->int_vector[0];
list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
}
+
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, &ring->ivec->napi);
}
static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
@@ -590,6 +594,7 @@ static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
ring->cur_idx = 0;
ring->dirty_idx = 0;
ring->index = idx;
+ ring->type = NETDEV_QUEUE_TYPE_RX;
ring->alloc_fail = 0;
for (i = 0; i < RTASE_NUM_DESC; i++)
@@ -597,6 +602,8 @@ static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
ring->ring_handler = rx_handler;
ring->ivec = &tp->int_vector[idx];
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, &ring->ivec->napi);
list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
}
@@ -1114,7 +1121,7 @@ static int rtase_open(struct net_device *dev)
/* request other interrupts to handle multiqueue */
for (i = 1; i < tp->int_nums; i++) {
ivec = &tp->int_vector[i];
- snprintf(ivec->name, sizeof(ivec->name), "%s_int%i",
+ snprintf(ivec->name, sizeof(ivec->name), "%s_int%u",
tp->dev->name, i);
ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
ivec->name, ivec);
@@ -1161,8 +1168,12 @@ static void rtase_down(struct net_device *dev)
ivec = &tp->int_vector[i];
napi_disable(&ivec->napi);
list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
- ring_entry)
+ ring_entry) {
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, NULL);
+
list_del(&ring->ring_entry);
+ }
}
netif_tx_disable(dev);
@@ -1518,8 +1529,12 @@ static void rtase_sw_reset(struct net_device *dev)
for (i = 0; i < tp->int_nums; i++) {
ivec = &tp->int_vector[i];
list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
- ring_entry)
+ ring_entry) {
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, NULL);
+
list_del(&ring->ring_entry);
+ }
}
ret = rtase_init_ring(dev);
@@ -1661,6 +1676,65 @@ static void rtase_get_stats64(struct net_device *dev,
stats->rx_length_errors = tp->stats.rx_length_errors;
}
+static void rtase_set_hw_cbs(const struct rtase_private *tp, u32 queue)
+{
+ u32 idle = tp->tx_qos[queue].idleslope * RTASE_1T_CLOCK;
+ u32 val, i;
+
+ val = u32_encode_bits(idle / RTASE_1T_POWER, RTASE_IDLESLOPE_INT_MASK);
+ idle %= RTASE_1T_POWER;
+
+ for (i = 1; i <= RTASE_IDLESLOPE_INT_SHIFT; i++) {
+ idle *= 2;
+ if ((idle / RTASE_1T_POWER) == 1)
+ val |= BIT(RTASE_IDLESLOPE_INT_SHIFT - i);
+
+ idle %= RTASE_1T_POWER;
+ }
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, val);
+}
+
+static int rtase_setup_tc_cbs(struct rtase_private *tp,
+ const struct tc_cbs_qopt_offload *qopt)
+{
+ int queue = qopt->queue;
+
+ if (queue < 0 || queue >= tp->func_tx_queue_num)
+ return -EINVAL;
+
+ if (!qopt->enable) {
+ tp->tx_qos[queue].hicredit = 0;
+ tp->tx_qos[queue].locredit = 0;
+ tp->tx_qos[queue].idleslope = 0;
+ tp->tx_qos[queue].sendslope = 0;
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, 0);
+ } else {
+ tp->tx_qos[queue].hicredit = qopt->hicredit;
+ tp->tx_qos[queue].locredit = qopt->locredit;
+ tp->tx_qos[queue].idleslope = qopt->idleslope;
+ tp->tx_qos[queue].sendslope = qopt->sendslope;
+
+ rtase_set_hw_cbs(tp, queue);
+ }
+
+ return 0;
+}
+
+static int rtase_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return rtase_setup_tc_cbs(tp, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static netdev_features_t rtase_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1696,6 +1770,7 @@ static const struct net_device_ops rtase_netdev_ops = {
.ndo_change_mtu = rtase_change_mtu,
.ndo_tx_timeout = rtase_tx_timeout,
.ndo_get_stats64 = rtase_get_stats64,
+ .ndo_setup_tc = rtase_setup_tc,
.ndo_fix_features = rtase_fix_features,
.ndo_set_features = rtase_set_features,
};
@@ -1811,6 +1886,18 @@ static void rtase_init_netdev_ops(struct net_device *dev)
dev->ethtool_ops = &rtase_ethtool_ops;
}
+static void rtase_init_napi(struct rtase_private *tp)
+{
+ u16 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ netif_napi_add_config(tp->dev, &tp->int_vector[i].napi,
+ tp->int_vector[i].poll, i);
+ netif_napi_set_irq(&tp->int_vector[i].napi,
+ tp->int_vector[i].irq);
+ }
+}
+
static void rtase_reset_interrupt(struct pci_dev *pdev,
const struct rtase_private *tp)
{
@@ -1896,9 +1983,6 @@ static void rtase_init_int_vector(struct rtase_private *tp)
memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name));
INIT_LIST_HEAD(&tp->int_vector[0].ring_list);
- netif_napi_add(tp->dev, &tp->int_vector[0].napi,
- tp->int_vector[0].poll);
-
/* interrupt vector 1 ~ 3 */
for (i = 1; i < tp->int_nums; i++) {
tp->int_vector[i].tp = tp;
@@ -1912,9 +1996,6 @@ static void rtase_init_int_vector(struct rtase_private *tp)
memset(tp->int_vector[i].name, 0x0,
sizeof(tp->int_vector[0].name));
INIT_LIST_HEAD(&tp->int_vector[i].ring_list);
-
- netif_napi_add(tp->dev, &tp->int_vector[i].napi,
- tp->int_vector[i].poll);
}
}
@@ -1923,7 +2004,7 @@ static u16 rtase_calc_time_mitigation(u32 time_us)
u8 msb, time_count, time_unit;
u16 int_miti;
- time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
+ time_us = min(time_us, RTASE_MITI_MAX_TIME);
if (time_us > RTASE_MITI_TIME_COUNT_MASK) {
msb = fls(time_us);
@@ -1945,7 +2026,7 @@ static u16 rtase_calc_packet_num_mitigation(u16 pkt_num)
u8 msb, pkt_num_count, pkt_num_unit;
u16 int_miti;
- pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM);
+ pkt_num = min(pkt_num, RTASE_MITI_MAX_PKT_NUM);
if (pkt_num > 60) {
pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX;
@@ -2146,6 +2227,8 @@ static int rtase_init_one(struct pci_dev *pdev,
goto err_out_del_napi;
}
+ rtase_init_napi(tp);
+
rtase_init_netdev_ops(dev);
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;