summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice/ice_ptp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_ptp.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c48
1 files changed, 41 insertions, 7 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 726579c0098c..30061598912b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -658,6 +658,9 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
struct ice_ptp_port *ptp_port;
bool ts_handled = true;
struct ice_pf *pf;
+ struct ice_hw *hw;
+ u64 tstamp_ready;
+ int err;
u8 idx;
if (!tx->init)
@@ -665,6 +668,12 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ hw = &pf->hw;
+
+ /* Read the Tx ready status first */
+ err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
+ if (err)
+ return false;
for_each_set_bit(idx, tx->in_use, tx->len) {
struct skb_shared_hwtstamps shhwtstamps = {};
@@ -672,7 +681,6 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
u64 raw_tstamp = 0, tstamp;
bool drop_ts = false;
struct sk_buff *skb;
- int err;
/* Drop packets which have waited for more than 2 seconds */
if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
@@ -680,27 +688,47 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
/* Count the number of Tx timestamps that timed out */
pf->ptp.tx_hwtstamp_timeouts++;
+ }
- goto skip_ts_read;
+ /* Only read a timestamp from the PHY if its marked as ready
+ * by the tstamp_ready register. This avoids unnecessary
+ * reading of timestamps which are not yet valid. This is
+ * important as we must read all timestamps which are valid
+ * and only timestamps which are valid during each interrupt.
+ * If we do not, the hardware logic for generating a new
+ * interrupt can get stuck on some devices.
+ */
+ if (!(tstamp_ready & BIT_ULL(phy_idx))) {
+ if (drop_ts)
+ goto skip_ts_read;
+
+ continue;
}
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
- err = ice_read_phy_tstamp(&pf->hw, tx->block, phy_idx,
- &raw_tstamp);
+ err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
if (err)
continue;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
- /* Check if the timestamp is invalid or stale */
- if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+ /* For PHYs which don't implement a proper timestamp ready
+ * bitmap, verify that the timestamp value is different
+ * from the last cached timestamp. If it is not, skip this for
+ * now assuming it hasn't yet been captured by hardware.
+ */
+ if (!drop_ts && tx->verify_cached &&
raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
+ /* Discard any timestamp value without the valid bit set */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID))
+ drop_ts = true;
+
skip_ts_read:
spin_lock(&tx->lock);
- if (raw_tstamp)
+ if (tx->verify_cached && raw_tstamp)
tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
@@ -836,6 +864,7 @@ ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
tx->block = port / ICE_PORTS_PER_QUAD;
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822;
tx->len = INDEX_PER_PORT_E822;
+ tx->verify_cached = 0;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -854,6 +883,11 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
tx->block = pf->hw.port_info->lport;
tx->offset = 0;
tx->len = INDEX_PER_PORT_E810;
+ /* The E810 PHY does not provide a timestamp ready bitmap. Instead,
+ * verify new timestamps against cached copy of the last read
+ * timestamp.
+ */
+ tx->verify_cached = 1;
return ice_ptp_alloc_tx_tracker(tx);
}