summaryrefslogtreecommitdiff
path: root/drivers/net/ipa/ipa_endpoint.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ipa/ipa_endpoint.c')
-rw-r--r--drivers/net/ipa/ipa_endpoint.c307
1 files changed, 158 insertions, 149 deletions
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index ce7f2d6e447e..2ee80ed140b7 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -241,7 +241,7 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (!data->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 buffer_size;
u32 aggr_size;
u32 limit;
@@ -304,7 +304,7 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
rx_config->aggr_hard_limit);
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
- limit = ipa_reg_field_max(reg, BYTE_LIMIT);
+ limit = reg_field_max(reg, BYTE_LIMIT);
if (aggr_size > limit) {
dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
data->endpoint_id, aggr_size, limit);
@@ -447,7 +447,7 @@ static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 field_id;
u32 offset;
bool state;
@@ -460,11 +460,11 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
WARN_ON(ipa->version >= IPA_VERSION_4_0);
reg = ipa_reg(ipa, ENDP_INIT_CTRL);
- offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ offset = reg_n_offset(reg, endpoint->endpoint_id);
val = ioread32(ipa->reg_virt + offset);
field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
- mask = ipa_reg_bit(reg, field_id);
+ mask = reg_bit(reg, field_id);
state = !!(val & mask);
@@ -493,13 +493,13 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
- val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
return !!(val & BIT(endpoint_id % 32));
}
@@ -510,12 +510,12 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
u32 mask = BIT(endpoint_id % 32);
struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
- iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
}
/**
@@ -613,7 +613,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
struct ipa_endpoint *endpoint;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
/* We only reset modem TX endpoints */
@@ -622,7 +622,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
continue;
reg = ipa_reg(ipa, ENDP_STATUS);
- offset = ipa_reg_n_offset(reg, endpoint_id);
+ offset = reg_n_offset(reg, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
@@ -645,7 +645,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_CFG);
@@ -658,7 +658,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
/* Checksum header offset is in 4-byte units */
off = sizeof(struct rmnet_map_header) / sizeof(u32);
- val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
+ val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_UL
@@ -671,26 +671,26 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
} else {
enabled = IPA_CS_OFFLOAD_NONE;
}
- val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
+ val |= reg_encode(reg, CS_OFFLOAD_EN, enabled);
/* CS_GEN_QMB_MASTER_SEL is 0 */
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return;
reg = ipa_reg(ipa, ENDP_INIT_NAT);
- val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
+ val = reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static u32
@@ -716,13 +716,13 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
static u32 ipa_header_size_encode(enum ipa_version version,
- const struct ipa_reg *reg, u32 header_size)
+ const struct reg *reg, u32 header_size)
{
- u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
+ u32 field_max = reg_field_max(reg, HDR_LEN);
u32 val;
/* We know field_max can be used as a mask (2^n - 1) */
- val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
+ val = reg_encode(reg, HDR_LEN, header_size & field_max);
if (version < IPA_VERSION_4_5) {
WARN_ON(header_size > field_max);
return val;
@@ -730,21 +730,21 @@ static u32 ipa_header_size_encode(enum ipa_version version,
/* IPA v4.5 adds a few more most-significant bits */
header_size >>= hweight32(field_max);
- WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
- val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
+ WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
+ val |= reg_encode(reg, HDR_LEN_MSB, header_size);
return val;
}
/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
static u32 ipa_metadata_offset_encode(enum ipa_version version,
- const struct ipa_reg *reg, u32 offset)
+ const struct reg *reg, u32 offset)
{
- u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
+ u32 field_max = reg_field_max(reg, HDR_OFST_METADATA);
u32 val;
/* We know field_max can be used as a mask (2^n - 1) */
- val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
+ val = reg_encode(reg, HDR_OFST_METADATA, offset);
if (version < IPA_VERSION_4_5) {
WARN_ON(offset > field_max);
return val;
@@ -752,8 +752,8 @@ static u32 ipa_metadata_offset_encode(enum ipa_version version,
/* IPA v4.5 adds a few more most-significant bits */
offset >>= hweight32(field_max);
- WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
- val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
+ WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
+ val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
return val;
}
@@ -783,7 +783,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR);
@@ -806,13 +806,13 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
off = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
if (version >= IPA_VERSION_4_5)
- off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
- val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
- val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
+ val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
+ val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
- val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
+ val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
@@ -820,7 +820,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
@@ -828,13 +828,13 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
u32 pad_align = endpoint->config.rx.pad_align;
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
- val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */
+ val |= reg_bit(reg, HDR_ENDIANNESS); /* big endian */
/* A QMAP header contains a 6 bit pad field at offset 0.
* The RMNet driver assumes this field is meaningful in
@@ -844,16 +844,16 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
* (although 0) should be ignored.
*/
if (!endpoint->toward_ipa) {
- val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
+ val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
- val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
+ val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
}
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
if (!endpoint->toward_ipa)
- val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
+ val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
@@ -861,25 +861,25 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->config.qmap && !endpoint->toward_ipa) {
- u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
u32 off; /* Field offset within header */
off = offsetof(struct rmnet_map_header, pkt_len);
/* Low bits are in the ENDP_INIT_HDR register */
off >>= hweight32(mask);
- val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
+ val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
u32 offset;
@@ -887,7 +887,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
return; /* Register not valid for TX endpoints */
reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
- offset = ipa_reg_n_offset(reg, endpoint_id);
+ offset = reg_n_offset(reg, endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->config.qmap)
@@ -899,7 +899,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -911,82 +911,90 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
- val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
- val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
+ val = reg_encode(reg, ENDP_MODE, IPA_DMA);
+ val |= reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
} else {
- val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
+ val = reg_encode(reg, ENDP_MODE, IPA_BASIC);
}
/* All other bits unspecified (and 0) */
- offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ offset = reg_n_offset(reg, endpoint->endpoint_id);
iowrite32(val, ipa->reg_virt + offset);
}
-/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
- * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
- * they're configured to have granularity 100 usec and 1 msec, respectively.
- *
- * The return value is the positive or negative Qtime value to use to
- * express the (microsecond) time provided. A positive return value
- * means pulse generator 0 can be used; otherwise use pulse generator 1.
+/* For IPA v4.5+, times are expressed using Qtime. A time is represented
+ * at one of several available granularities, which are configured in
+ * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
+ * generators are set up with different "tick" periods. A Qtime value
+ * encodes a tick count along with an indication of a pulse generator
+ * (which has a fixed tick period). Two pulse generators are always
+ * available to the AP; a third is available starting with IPA v5.0.
+ * This function determines which pulse generator most accurately
+ * represents the time period provided, and returns the tick count to
+ * use to represent that time.
*/
-static int ipa_qtime_val(u32 microseconds, u32 max)
-{
- u32 val;
-
- /* Use 100 microsecond granularity if possible */
- val = DIV_ROUND_CLOSEST(microseconds, 100);
- if (val <= max)
- return (int)val;
-
- /* Have to use pulse generator 1 (millisecond granularity) */
- val = DIV_ROUND_CLOSEST(microseconds, 1000);
- WARN_ON(val > max);
+static u32
+ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
+{
+ u32 which = 0;
+ u32 ticks;
+
+ /* Pulse generator 0 has 100 microsecond granularity */
+ ticks = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (ticks <= max)
+ goto out;
+
+ /* Pulse generator 1 has millisecond granularity */
+ which = 1;
+ ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
+ if (ticks <= max)
+ goto out;
+
+ if (ipa->version >= IPA_VERSION_5_0) {
+ /* Pulse generator 2 has 10 millisecond granularity */
+ which = 2;
+ ticks = DIV_ROUND_CLOSEST(microseconds, 100);
+ }
+ WARN_ON(ticks > max);
+out:
+ *select = which;
- return (int)-val;
+ return ticks;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */
-static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
+static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
u32 microseconds)
{
+ u32 ticks;
u32 max;
- u32 val;
if (!microseconds)
return 0; /* Nothing to compute if time limit is 0 */
- max = ipa_reg_field_max(reg, TIME_LIMIT);
+ max = reg_field_max(reg, TIME_LIMIT);
if (ipa->version >= IPA_VERSION_4_5) {
- u32 gran_sel;
- int ret;
-
- /* Compute the Qtime limit value to use */
- ret = ipa_qtime_val(microseconds, max);
- if (ret < 0) {
- val = -ret;
- gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
- } else {
- val = ret;
- gran_sel = 0;
- }
+ u32 select;
+
+ ticks = ipa_qtime_val(ipa, microseconds, max, &select);
- return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
+ return reg_encode(reg, AGGR_GRAN_SEL, select) |
+ reg_encode(reg, TIME_LIMIT, ticks);
}
/* We program aggregation granularity in ipa_hardware_config() */
- val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
- WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
+ ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
+ WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
microseconds, max * IPA_AGGR_GRANULARITY);
- return ipa_reg_encode(reg, TIME_LIMIT, val);
+ return reg_encode(reg, TIME_LIMIT, ticks);
}
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
@@ -997,13 +1005,13 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
u32 limit;
rx_config = &endpoint->config.rx;
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
- val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
+ val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
+ val |= reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
buffer_size = rx_config->buffer_size;
limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
- val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
+ val |= reg_encode(reg, BYTE_LIMIT, limit);
limit = rx_config->aggr_time_limit;
val |= aggr_time_limit_encode(ipa, reg, limit);
@@ -1011,20 +1019,20 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* AGGR_PKT_LIMIT is 0 (unlimited) */
if (rx_config->aggr_close_eof)
- val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
+ val |= reg_bit(reg, SW_EOF_ACTIVE);
} else {
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
- val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
+ val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
+ val |= reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
+ val |= reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
/* other fields ignored */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
/* The head-of-line blocking timer is defined as a tick count. For
@@ -1035,7 +1043,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
* Return the encoded value representing the timeout period provided
* that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
*/
-static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
+static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
u32 microseconds)
{
u32 width;
@@ -1049,21 +1057,14 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
return 0; /* Nothing to compute if timer period is 0 */
if (ipa->version >= IPA_VERSION_4_5) {
- u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
- u32 gran_sel;
- int ret;
-
- /* Compute the Qtime limit value to use */
- ret = ipa_qtime_val(microseconds, max);
- if (ret < 0) {
- val = -ret;
- gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
- } else {
- val = ret;
- gran_sel = 0;
- }
+ u32 max = reg_field_max(reg, TIMER_LIMIT);
+ u32 select;
+ u32 ticks;
- return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
+ ticks = ipa_qtime_val(ipa, microseconds, max, &select);
+
+ return reg_encode(reg, TIMER_GRAN_SEL, 1) |
+ reg_encode(reg, TIMER_LIMIT, ticks);
}
/* Use 64 bit arithmetic to avoid overflow */
@@ -1071,11 +1072,11 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
/* We still need the result to fit into the field */
- WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
+ WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
/* IPA v3.5.1 through v4.1 just record the tick count */
if (ipa->version < IPA_VERSION_4_2)
- return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
+ return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
/* For IPA v4.2, the tick count is represented by base and
* scale fields within the 32-bit timer register, where:
@@ -1086,7 +1087,7 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
* such that high bit is included.
*/
high = fls(ticks); /* 1..32 (or warning above) */
- width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
+ width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
scale = high > width ? high - width : 0;
if (scale) {
/* If we're scaling, round up to get a closer result */
@@ -1096,8 +1097,8 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
scale++;
}
- val = ipa_reg_encode(reg, TIMER_SCALE, scale);
- val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
+ val = reg_encode(reg, TIMER_SCALE, scale);
+ val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
return val;
}
@@ -1108,14 +1109,14 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
val = hol_block_timer_encode(ipa, reg, microseconds);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void
@@ -1123,13 +1124,13 @@ ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
- offset = ipa_reg_n_offset(reg, endpoint_id);
- val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
+ offset = reg_n_offset(reg, endpoint_id);
+ val = enable ? reg_bit(reg, HOL_BLOCK_EN) : 0;
iowrite32(val, ipa->reg_virt + offset);
@@ -1170,7 +1171,7 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
if (!endpoint->toward_ipa)
@@ -1182,7 +1183,7 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
@@ -1190,20 +1191,20 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
u32 resource_group = endpoint->config.resource_group;
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
- val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
+ val = reg_encode(reg, ENDP_RSRC_GRP, resource_group);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (!endpoint->toward_ipa)
@@ -1212,14 +1213,14 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
reg = ipa_reg(ipa, ENDP_INIT_SEQ);
/* Low-order byte configures primary packet processing */
- val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
+ val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
/* Second byte (if supported) configures replicated packet processing */
if (ipa->version < IPA_VERSION_4_5)
- val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
- endpoint->config.tx.seq_rep_type);
+ val |= reg_encode(reg, SEQ_REP_TYPE,
+ endpoint->config.tx.seq_rep_type);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
/**
@@ -1269,12 +1270,12 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_STATUS);
if (endpoint->config.status_enable) {
- val |= ipa_reg_bit(reg, STATUS_EN);
+ val |= reg_bit(reg, STATUS_EN);
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
@@ -1282,8 +1283,7 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
- val |= ipa_reg_encode(reg, STATUS_ENDP,
- status_endpoint_id);
+ val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
}
/* STATUS_LOCATION is 0, meaning IPA packet status
* precedes the packet (not present for IPA v4.5+)
@@ -1291,7 +1291,7 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
@@ -1635,18 +1635,18 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, ROUTE);
/* ROUTE_DIS is 0 */
- val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
- val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
+ val = reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
+ val |= reg_bit(reg, ROUTE_DEF_HDR_TABLE);
/* ROUTE_DEF_HDR_OFST is 0 */
- val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
- val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
+ val |= reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
+ val |= reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
@@ -1984,8 +1984,9 @@ void ipa_endpoint_deconfig(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 endpoint_id;
+ u32 hw_limit;
u32 tx_count;
u32 rx_count;
u32 rx_base;
@@ -2017,12 +2018,12 @@ int ipa_endpoint_config(struct ipa *ipa)
* the highest one doesn't exceed the number supported by software.
*/
reg = ipa_reg(ipa, FLAVOR_0);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ val = ioread32(ipa->reg_virt + reg_offset(reg));
/* Our RX is an IPA producer; our TX is an IPA consumer. */
- tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
- rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val);
- rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
+ tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
+ rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
+ rx_base = reg_decode(reg, PROD_LOWEST, val);
limit = rx_base + rx_count;
if (limit > IPA_ENDPOINT_MAX) {
@@ -2031,6 +2032,14 @@ int ipa_endpoint_config(struct ipa *ipa)
return -EINVAL;
}
+ /* Until IPA v5.0, the max endpoint ID was 32 */
+ hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
+ if (limit > hw_limit) {
+ dev_err(dev, "unexpected endpoint count, %u > %u\n",
+ limit, hw_limit);
+ return -EINVAL;
+ }
+
/* Allocate and initialize the available endpoint bitmap */
ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
if (!ipa->available)